void Fisherfaces::predict(InputArray _src, Ptr<PredictCollector> collector, const int state) const {
    Mat src = _src.getMat();
    // check data alignment just for clearer exception messages
    if(_projections.empty()) {
        // throw error if no data (or simply return -1?)
        String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
        CV_Error(Error::StsBadArg, error_message);
    } else if(src.total() != (size_t) _eigenvectors.rows) {
        String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
        CV_Error(Error::StsBadArg, error_message);
    }
    // project into LDA subspace
    Mat q = LDA::subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
    // find 1-nearest neighbor
    collector->init((int)_projections.size(), state);
    for (size_t sampleIdx = 0; sampleIdx < _projections.size(); sampleIdx++) {
        double dist = norm(_projections[sampleIdx], q, NORM_L2);
        int label = _labels.at<int>((int)sampleIdx);
        if (!collector->emit(label, dist, state))return;
    }
}
TEST_P(GuidedFilterTest, smallParamsIssue)
{
    GFParams params = GetParam();
    string guideFileName = get<1>(params);
    string srcFileName = get<2>(params);
    int guideCnNum = 3;
    int srcCnNum = get<0>(params);

    Mat guide = imread(getOpenCVExtraDir() + guideFileName);
    Mat src = imread(getOpenCVExtraDir() + srcFileName);
    ASSERT_TRUE(!guide.empty() && !src.empty());

    Size dstSize(guide.cols, guide.rows);
    guide = convertTypeAndSize(guide, CV_MAKE_TYPE(guide.depth(), guideCnNum), dstSize);
    src = convertTypeAndSize(src, CV_MAKE_TYPE(src.depth(), srcCnNum), dstSize);
    Mat output;

    ximgproc::guidedFilter(guide, src, output, 3, 1e-6);

    size_t whitePixels = 0;
    for(int i = 0; i < output.cols; i++)
    {
        for(int j = 0; j < output.rows; j++)
        {
            if(output.channels() == 1)
            {
                if(output.ptr<uchar>(i)[j] == 255)
                    whitePixels++;
            }
            else if(output.channels() == 3)
            {
                Vec3b currentPixel = output.ptr<Vec3b>(i)[j];
                if(currentPixel == Vec3b(255, 255, 255))
                    whitePixels++;
            }
        }
    }
    double whiteRate = whitePixels / (double) output.total();
    EXPECT_LE(whiteRate, 0.1);
}
Example #3
0
int FeatureExtract(Mat &Input,Mat FeaturePoints,Mat &Output){

	/*-----------------minAreaRect-----------------------*/

	RotatedRect box = minAreaRect(FeaturePoints); 
	Point2f vtx[4];
	box.points(vtx);//get the vertices of rectangle

	for( int i = 0; i < 4; i++ )  
	{
		line(Input, vtx[i], vtx[(i+1)%4], Scalar(255), 1, CV_AA);  
	}

	Output=Input;
	/*------------------Hu Moments----------------------*/
	Moments h;
	double hu[7];
	h=moments(Input,false);
	HuMoments(h,hu);
	
	/*-----------------------Area of Hand------------------*/
	double HandArea=FeaturePoints.total();


	/*-------------------Area of minRect--------------------*/
	double leng=sqrt((vtx[0].x-vtx[1].x)*(vtx[0].x-vtx[1].x)+(vtx[0].y-vtx[1].y)*(vtx[0].y-vtx[1].y));
	double width=sqrt((vtx[1].x-vtx[2].x)*(vtx[1].x-vtx[2].x)+(vtx[1].y-vtx[2].y)*(vtx[1].y-vtx[2].y));

	double MinrectArea=leng*width;

	double ratio=HandArea/MinrectArea;

	double feature[5]={hu[0],hu[1],hu[2],hu[3],ratio};
	
	/*----------------write into a txt file-------------------*/
	WriteIntoTxt(feature);

	return 1;
	
}
Example #4
0
static Mat histc_(const Mat& src, int minVal=0, int maxVal=255, bool normed=false)
{
    Mat result;

    // Establish the number of bins.
    int histSize           = maxVal-minVal+1;

    // Set the ranges.
    float range[]          = { static_cast<float>(minVal), static_cast<float>(maxVal+1) };
    const float* histRange = { range };

    // calc histogram
    calcHist(&src, 1, 0, Mat(), result, 1, &histSize, &histRange, true, false);

    // normalize
    if (normed)
    {
        result /= (int)src.total();
    }

    return result.reshape(1,1);
}
Example #5
0
void alphaBlendC1_optimized(const Mat& src, Mat& dst, const Mat& alpha)
{
    CV_Assert(src.type() == dst.type() == alpha.type() == CV_8UC1 &&
              src.isContinuous() && dst.isContinuous() && alpha.isContinuous() &&
              (src.cols % 8 == 0) &&
              (src.cols == dst.cols) && (src.cols == alpha.cols));
    
#if !defined(__ARM_NEON__) || !USE_NEON
    alphaBlendC1(src, dst, alpha);
#else
    uchar* pSrc = src.data;
    uchar* pDst = dst.data;
    uchar* pAlpha = alpha.data;
    for(int i = 0; i < src.total(); i+=8, pSrc+=8, pDst+=8, pAlpha+=8)
    {
        //load
        uint8x8_t vsrc = vld1_u8(pSrc);
        uint8x8_t vdst = vld1_u8(pDst);
        uint8x8_t valpha = vld1_u8(pAlpha);
        uint8x8_t v255 = vdup_n_u8(255);
        
        // source
        uint16x8_t mult1 = vmull_u8(vsrc, valpha);
        
        // destination
        uint8x8_t tmp = vsub_u8(v255, valpha);
        uint16x8_t mult2 = vmull_u8(tmp, vdst);
        
        //add them
        uint16x8_t sum = vaddq_u16(mult1, mult2);
        
        //take upper bytes, emulation /255
        uint8x8_t out = vshrn_n_u16(sum, 8);
        
        // store to the memory
        vst1_u8(pDst, out);
    }
#endif
}
Example #6
0
//------------------------------------------------------------------------------
// cv::subspace::reconstruct
//------------------------------------------------------------------------------
Mat cv::subspace::reconstruct(InputArray _W, InputArray _mean, InputArray _src) {
    // get data matrices
    Mat W = _W.getMat();
    Mat mean = _mean.getMat();
    Mat src = _src.getMat();
    // get number of samples and dimension
    int n = src.rows;
    int d = src.cols;
    // initalize temporary matrices
    Mat X, Y;
    // copy data & make sure we are using the correct type
    src.convertTo(Y, W.type());
    // calculate the reconstruction
    gemm(Y,
            W,
            1.0,
            (d == mean.total()) ? repeat(mean.reshape(1,1), n, 1) : Mat(),
            (d == mean.total()) ? 1.0 : 0.0,
            X,
            GEMM_2_T);
    return X;
}
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{    
    b.gI1.upload(I1);
    b.gI2.upload(I2);

    b.gI1.convertTo(b.t1, CV_32F);
    b.gI2.convertTo(b.t2, CV_32F);

    gpu::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);
    gpu::multiply(b.gs, b.gs, b.gs);

    double sse = gpu::sum(b.gs, b.buf)[0];

    if( sse <= 1e-10) // for small values return zero
        return 0;
    else
    {
        double mse = sse /(double)(I1.channels() * I1.total());
        double psnr = 10.0*log10((255*255)/mse);
        return psnr;
    }
}
void AdaptiveManifoldFilterN::computeClusters(Mat1b& cluster, Mat1b& cluster_minus, Mat1b& cluster_plus)
{
    Mat difEtaSrc;
    {
        vector<Mat> eta_difCn(jointCnNum);
        for (int i = 0; i < jointCnNum; i++)
            subtract(jointCn[i], etaFull[i], eta_difCn[i]);

        merge(eta_difCn, difEtaSrc);
        difEtaSrc = difEtaSrc.reshape(1, (int)difEtaSrc.total());
        CV_DbgAssert(difEtaSrc.cols == jointCnNum);
    }

    Mat1f initVec(1, jointCnNum);
    if (useRNG)
    {
        rnd.fill(initVec, RNG::UNIFORM, -0.5, 0.5);
    }
    else
    {
        for (int i = 0; i < (int)initVec.total(); i++)
            initVec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
    }

    Mat1f eigenVec(1, jointCnNum);
    computeEigenVector(difEtaSrc, cluster, eigenVec, num_pca_iterations_, initVec);

    Mat1f difOreientation;
    gemm(difEtaSrc, eigenVec, 1, noArray(), 0, difOreientation, GEMM_2_T);
    difOreientation = difOreientation.reshape(1, srcSize.height);
    CV_DbgAssert(difOreientation.size() == srcSize);

    compare(difOreientation, 0, cluster_minus, CMP_LT);
    bitwise_and(cluster_minus, cluster, cluster_minus);

    compare(difOreientation, 0, cluster_plus, CMP_GE);
    bitwise_and(cluster_plus, cluster, cluster_plus);
}
Example #9
0
/**
 * This is the streaming client, run as separate thread
 */
void* sendData(void* arg)
{
    /* make this thread cancellable using pthread_cancel() */
    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
    
    int *clientSock = (int*)arg;
    
    int  bytes=0;
   // img2 = (img1.reshape(0,1)); // to make it continuous
    
    /* start sending images */
    while(1)
    {
        /* send the grayscaled frame, thread safe */
        
        if (is_data_ready) {
            pthread_mutex_lock(&gmutex);
            int  imgSize = img1.total()*img1.elemSize();
            //img2 = (img1.reshape(0,1));
            cout << "\n--> Transferring  (" << img1.cols << "x" << img1.rows << ")  images to the:  " << server_ip << ":" << server_port << endl;
            if ((bytes = send(*clientSock, img1.data, imgSize, 0)) < 0){
                perror("send failed");
                cerr << "\n--> bytes = " << bytes << endl;
                quit("\n--> send() failed", 1);
            }
            is_data_ready = 0;
            pthread_mutex_unlock(&gmutex);
            
        }
        /* have we terminated yet? */
        pthread_testcancel();
        
        /* no, take a rest for a while */
        usleep(1000);   //1000 Micro Sec
    }
    return 0;
}
double getPSNR(const Mat &I1, const Mat &I2) {
    Mat s1;
    absdiff(I1, I2, s1);       // |I1 - I2|
    s1.convertTo(s1, CV_32F);  // cannot make a square on 8 bits
    s1 = s1.mul(s1);           // |I1 - I2|^2

    Scalar s = sum(s1);         // sum elements per channel

    double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
    vector<vector<Point> > contours;
    Mat tmp;
    cvtColor(I2, tmp, CV_RGB2GRAY);
    findContours(tmp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
    double area = contours.size() > 0 ? contourArea(contours[0]) : I1.total();

    if (sse <= 1e-10) // for small values return zero
        return 0;
    else {
        double mse = sse / (I1.channels() * area);
        double psnr = 10.0 * log10((255 * 255) / mse);
        return psnr;
    }
}
TEST_P(DomainTransformTest, MultiThreadReproducibility)
{
    if (cv::getNumberOfCPUs() == 1)
        return;

    double MAX_DIF = 1.0;
    double MAX_MEAN_DIF = 1.0 / 256.0;
    int loopsCount = 2;
    RNG rng(0);

    DTParams params = GetParam();
    Size size = get<0>(params);
    int mode = get<1>(params);
    int guideType = get<2>(params);
    int srcType = get<3>(params);

    Mat original = imread(getOpenCVExtraDir() + "cv/edgefilter/statue.png");
    Mat guide = convertTypeAndSize(original, guideType, size);
    Mat src = convertTypeAndSize(original, srcType, size);

    for (int iter = 0; iter <= loopsCount; iter++)
    {
        double ss = rng.uniform(0.0, 100.0);
        double sc = rng.uniform(0.0, 100.0);

        cv::setNumThreads(cv::getNumberOfCPUs());
        Mat resMultithread;
        dtFilter(guide, src, resMultithread, ss, sc, mode);

        cv::setNumThreads(1);
        Mat resSingleThread;
        dtFilter(guide, src, resSingleThread, ss, sc, mode);

        EXPECT_LE(cv::norm(resSingleThread, resMultithread, NORM_INF), MAX_DIF);
        EXPECT_LE(cv::norm(resSingleThread, resMultithread, NORM_L1), MAX_MEAN_DIF*src.total());
    }
}
Example #12
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        dst.create(images[0].size(), CV_32FCC);
        Mat result = dst.getMat();

        Mat response = input_response.getMat();
        if(response.empty()) {
            float middle = LDR_SIZE / 2.0f;
            response = linearResponse(channels) / middle;
        }
        CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
                  response.channels() == channels);

        result = Mat::zeros(images[0].size(), CV_32FCC);
        Mat wsum = Mat::zeros(images[0].size(), CV_32FCC);
        for(size_t i = 0; i < images.size(); i++) {
            Mat im, w;
            LUT(images[i], weight, w);
            LUT(images[i], response, im);

            result += times.at<float>((int)i) * w.mul(im);
            wsum += times.at<float>((int)i) * times.at<float>((int)i) * w;
        }
        result = result.mul(1 / wsum);
    }
void Eigenfaces::predict(const Mat& src, int &minClass, double &minDist) {
	if (_projections.empty()) {
		// throw error if no data (or simply return -1?)
		string error_message = "This cv::Eigenfaces model is not computed yet. Did you call cv::Eigenfaces::train?";
		CV_Error(CV_StsError, error_message);
	}
	else if (_eigenvectors.rows != src.total()) {
		// check data alignment just for clearer exception messages
		string error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
		CV_Error(CV_StsError, error_message);
	}
	// project into PCA subspace
	Mat q = project(src.reshape(1, 1));
	// find 1-nearest neighbor
	minDist = DBL_MAX;
	minClass = -1;
	for (int sampleIdx = 0; sampleIdx < _projections.size(); sampleIdx++) {
		double dist = norm(_projections[sampleIdx], q, NORM_L2);
		if ((dist < minDist) && (dist < _threshold)) {
			minDist = dist;
			minClass = _labels[sampleIdx];
		}
	}
}
Example #14
0
int Embed::forward(const Mat& bottom_blob, Mat& top_blob) const
{
    int words = bottom_blob.total();

    top_blob.create(num_output, words);
    if (top_blob.empty())
        return -100;

    // num_output
    #pragma omp parallel for
    for (int q=0; q<words; q++)
    {
        float* outptr = top_blob.row(q);

        int word_index = ((const int*)bottom_blob)[q];

        if (word_index < 0)
            word_index = 0;
        if (word_index >= input_dim)
            word_index = input_dim - 1;

        const float* em = (const float*)weight_data + num_output * word_index;

        memcpy(outptr, em, num_output * sizeof(float));

        if (bias_term)
        {
            for (int p=0; p<num_output; p++)
            {
                outptr[p] += bias_data[p];
            }
        }
    }

    return 0;
}
Example #15
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times)
    {
        CV_INSTRUMENT_REGION()

        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
        CV_Assert(channels >= 1 && channels <= 3);

        dst.create(LDR_SIZE, 1, CV_32FCC);
        Mat response = dst.getMat();
        response = linearResponse(3) / (LDR_SIZE / 2.0f);

        Mat card = Mat::zeros(LDR_SIZE, 1, CV_32FCC);
        for(size_t i = 0; i < images.size(); i++) {
           uchar *ptr = images[i].ptr();
           for(size_t pos = 0; pos < images[i].total(); pos++) {
               for(int c = 0; c < channels; c++, ptr++) {
                   card.at<Vec3f>(*ptr)[c] += 1;
               }
           }
        }
        card = 1.0 / card;

        Ptr<MergeRobertson> merge = createMergeRobertson();
        for(int iter = 0; iter < max_iter; iter++) {

            radiance = Mat::zeros(images[0].size(), CV_32FCC);
            merge->process(images, radiance, times, response);

            Mat new_response = Mat::zeros(LDR_SIZE, 1, CV_32FC3);
            for(size_t i = 0; i < images.size(); i++) {
                uchar *ptr = images[i].ptr();
                float* rad_ptr = radiance.ptr<float>();
                for(size_t pos = 0; pos < images[i].total(); pos++) {
                    for(int c = 0; c < channels; c++, ptr++, rad_ptr++) {
                        new_response.at<Vec3f>(*ptr)[c] += times.at<float>((int)i) * *rad_ptr;
                    }
                }
            }
            new_response = new_response.mul(card);
            for(int c = 0; c < 3; c++) {
                float middle = new_response.at<Vec3f>(LDR_SIZE / 2)[c];
                for(int i = 0; i < LDR_SIZE; i++) {
                    new_response.at<Vec3f>(i)[c] /= middle;
                }
            }
            float diff = static_cast<float>(sum(sum(abs(new_response - response)))[0] / channels);
            new_response.copyTo(response);
            if(diff < threshold) {
                break;
            }
        }
    }
Example #16
0
float BoardDetector::detect(const vector< Marker > &detectedMarkers, const BoardConfiguration &BConf, Board &Bdetected, Mat camMatrix, Mat distCoeff,
                            float markerSizeMeters) throw(cv::Exception) {
    if (BConf.size() == 0)
        throw cv::Exception(8881, "BoardDetector::detect", "Invalid BoardConfig that is empty", __FILE__, __LINE__);
    if (BConf[0].size() < 2)
        throw cv::Exception(8881, "BoardDetector::detect", "Invalid BoardConfig that is empty 2", __FILE__, __LINE__);
    // compute the size of the markers in meters, which is used for some routines(mostly drawing)
    float ssize;
    if (BConf.mInfoType == BoardConfiguration::PIX && markerSizeMeters > 0)
        ssize = markerSizeMeters;
    else if (BConf.mInfoType == BoardConfiguration::METERS) {
        ssize = cv::norm(BConf[0][0] - BConf[0][1]);
    }

    // cout<<"markerSizeMeters="<<markerSizeMeters<<endl;
    Bdetected.clear();
    /// find among detected markers these that belong to the board configuration
    for (unsigned int i = 0; i < detectedMarkers.size(); i++) {
        int idx = BConf.getIndexOfMarkerId(detectedMarkers[i].id);
        if (idx != -1) {
            Bdetected.push_back(detectedMarkers[i]);
            Bdetected.back().ssize = ssize;
        }
    }
    // copy configuration
    Bdetected.conf = BConf;
    //

    bool hasEnoughInfoForRTvecCalculation = false;
    if (Bdetected.size() >= 1) {
        if (camMatrix.rows != 0) {
            if (markerSizeMeters > 0 && BConf.mInfoType == BoardConfiguration::PIX)
                hasEnoughInfoForRTvecCalculation = true;
            else if (BConf.mInfoType == BoardConfiguration::METERS)
                hasEnoughInfoForRTvecCalculation = true;
        }
    }

    // calculate extrinsic if there is information for that
    if (hasEnoughInfoForRTvecCalculation) {

        // calculate the size of the markers in meters if expressed in pixels
        double marker_meter_per_pix = 0;
        if (BConf.mInfoType == BoardConfiguration::PIX)
            marker_meter_per_pix = markerSizeMeters / cv::norm(BConf[0][0] - BConf[0][1]);
        else
            marker_meter_per_pix = 1; // to avoind interferring the process below

        // now, create the matrices for finding the extrinsics
        vector< cv::Point3f > objPoints;
        vector< cv::Point2f > imagePoints;
        for (size_t i = 0; i < Bdetected.size(); i++) {
            int idx = Bdetected.conf.getIndexOfMarkerId(Bdetected[i].id);
            assert(idx != -1);
            for (int p = 0; p < 4; p++) {
                imagePoints.push_back(Bdetected[i][p]);
                const aruco::MarkerInfo &Minfo = Bdetected.conf.getMarkerInfo(Bdetected[i].id);
                objPoints.push_back(Minfo[p] * marker_meter_per_pix);
                //  		cout<<objPoints.back()<<endl;
            }
        }
        if (distCoeff.total() == 0)
            distCoeff = cv::Mat::zeros(1, 4, CV_32FC1);

        // 	    for(size_t i=0;i< imagePoints.size();i++){
        // 		cout<<objPoints[i]<<" "<<imagePoints[i]<<endl;
        // 	    }
        // 	    cout<<"cam="<<camMatrix<<" "<<distCoeff<<endl;
        cv::Mat rvec, tvec;
        cv::solvePnP(objPoints, imagePoints, camMatrix, distCoeff, rvec, tvec);
        rvec.convertTo(Bdetected.Rvec, CV_32FC1);
        tvec.convertTo(Bdetected.Tvec, CV_32FC1);
        //             cout<<rvec<< " "<<tvec<<" _setYPerpendicular="<<_setYPerpendicular<<endl;

        {
            vector< cv::Point2f > reprojected;
            cv::projectPoints(objPoints, rvec, tvec, camMatrix, distCoeff, reprojected);
            double errSum = 0;
            // check now the reprojection error and
            for (size_t i = 0; i < reprojected.size(); i++) {
                errSum += cv::norm(reprojected[i] - imagePoints[i]);
            }
            //                  cout<<"AAA RE="<<errSum/double ( reprojected.size() ) <<endl;
        }
        // now, do a refinement and remove points whose reprojection error is above a threshold, then repeat calculation with the rest
        if (repj_err_thres > 0) {
            vector< cv::Point2f > reprojected;
            cv::projectPoints(objPoints, rvec, tvec, camMatrix, distCoeff, reprojected);

            vector< int > pointsThatPassTest; // indices
            // check now the reprojection error and
            for (size_t i = 0; i < reprojected.size(); i++) {
                float err = cv::norm(reprojected[i] - imagePoints[i]);
                if (err < repj_err_thres)
                    pointsThatPassTest.push_back(i);
            }
            // cerr<<"Number of points after reprjection test "<<pointsThatPassTest.size() <<"/"<<objPoints.size() <<endl;
            // copy these data to another vectors and repeat
            vector< cv::Point3f > objPoints_filtered;
            vector< cv::Point2f > imagePoints_filtered;
            for (size_t i = 0; i < pointsThatPassTest.size(); i++) {
                objPoints_filtered.push_back(objPoints[pointsThatPassTest[i]]);
                imagePoints_filtered.push_back(imagePoints[pointsThatPassTest[i]]);
            }

            cv::solvePnP(objPoints_filtered, imagePoints_filtered, camMatrix, distCoeff, rvec, tvec);
            rvec.convertTo(Bdetected.Rvec, CV_32FC1);
            tvec.convertTo(Bdetected.Tvec, CV_32FC1);
        }


        // now, rotate 90 deg in X so that Y axis points up
        if (_setYPerpendicular)
            rotateXAxis(Bdetected.Rvec);
        //         cout<<Bdetected.Rvec.at<float>(0,0)<<" "<<Bdetected.Rvec.at<float>(1,0)<<" "<<Bdetected.Rvec.at<float>(2,0)<<endl;
        //         cout<<Bdetected.Tvec.at<float>(0,0)<<" "<<Bdetected.Tvec.at<float>(1,0)<<" "<<Bdetected.Tvec.at<float>(2,0)<<endl;
    }

    float prob = float(Bdetected.size()) / double(Bdetected.conf.size());
    return prob;
}
Example #17
0
cv::Mat SIFT::sp_find_sift_grid( Mat I,Mat gridX,Mat gridY,int patchSize, float sigma_edge )
{
	int num_angles=8;
	float num_bins=4;
	int num_samples=num_bins*num_bins;
	int alpha = 9;

	//此处需要判断总共传入多少个变量,如果变量数小于5,就把sigma_edge设置为1
	float angle_step=2*pi/num_angles;

	//初始化angles 为一个一维矩阵从0到2*pi;间隔为angle_step
	Mat angles=create(0,2*pi,angle_step);
	angles=deleteO(angles); //删除最后一个元素

	CvSize size=I.size();
	//int hgt=size.height;
	//int wid=size.width;

	int num_patches=gridX.total();//计算gridX总共有多少个元素

	Mat sift_arr=Mat::zeros(num_patches,num_samples*num_angles,CV_32F);

	//计算滤波算子
	int  f_wid = 4 * ceil(sigma_edge) + 1;
	Mat G=gaussian(f_wid,sigma_edge);

	Mat GX=gradientX(G);
	Mat GY=gradientY(G);


	GX=GX*2/totalSumO(GX);
	GY=GY*2/totalSumO(GY);



	Mat I_X(I.rows,I.cols,CV_32F);
	I_X=filter2(GX,I);              //因为I,图片读入不同,所以I_X不同,与I有关的均布相同,但是,都正确


	Mat I_Y(I.rows,I.cols,CV_32F);
	I_Y=filter2(GY,I);

	Mat T(I_X.rows,I_X.cols,CV_32F);
	add(I_X.mul(I_X),I_Y.mul(I_Y),T);
	Mat I_mag(I_X.rows,I_X.cols,CV_32F);
	sqrt(T,I_mag);
	Mat I_theta=matan2(I_Y,I_X);

	Mat interval=create(2/num_bins,2,2/num_bins);
	interval-=(1/num_bins+1);

	Mat sample_x=meshgrid_X(interval,interval);


	Mat sample_y=meshgrid_Y(interval,interval);

	sample_x=reshapeX(sample_x);//变为一个1维矩阵

	sample_y=reshapeX(sample_y);

	Mat I_orientation[8] = {Mat::zeros(size,CV_32F)};
	for(int i=0;i<8;i++)
	{
		I_orientation[i] = Mat::zeros(size,CV_32F);
	}
	float *pt=angles.ptr<float>(0);

	for(int a=0;a<num_angles;a++)
	{
		Mat tep1=mcos(I_theta-pt[a]);//cos
		//cout<<tep1.at<float>(0,1)<<endl;
		Mat tep(tep1.rows,tep1.cols,CV_32F);
		pow(tep1,alpha,tep);
		tep=compareB(tep,0);
		I_orientation[a]=tep.mul(I_mag);
	}

	for(int i=0;i<num_patches;i++)
	{

		double r=patchSize/2;
		float l=(float)(i/gridX.rows);
		float m=i%gridX.rows;
		float cx=gridX.at<float>(m,l)+r-0.5;
		float cy=gridY.at<float>(m,l)+r-0.5;

		Mat sample_x_t=Add(sample_x*r,cx);
		Mat sample_y_t=Add(sample_y*r,cy);
		float *pt1=sample_y_t.ptr<float>(0);
		float sample_res=pt1[1]-pt1[0];

// 		int c=(int)i/gridX.rows;
// 		float *ptc1=gridX.ptr<float>(c);
// 		int x_lo=ptc1[i%gridX.rows];

		int x_lo = gridX.at<float>(i % gridX.rows, i / gridX.rows);
		int x_hi=patchSize+x_lo-1;
/*		float *ptc2=gridY.ptr<float>(c);*/


		int y_lo=gridY.at<float>(i % gridY.rows, i / gridY.rows);
		int y_hi=y_lo+patchSize-1;

		Mat A=create(x_lo,x_hi,1);
		Mat B=create(y_lo,y_hi,1);


		Mat sample_px=meshgrid_X(A,B);
		Mat sample_py=meshgrid_Y(A,B);

		int num_pix = sample_px.total();//计算sample_px元素总数
		sample_px=reshapeY(sample_px);
		sample_py=reshapeY(sample_py);


		Mat dist_px=abs(repmat(sample_px,1,num_samples)-repmat(sample_x_t,num_pix,1));
		Mat dist_py=abs(repmat(sample_py,1,num_samples)-repmat(sample_y_t,num_pix,1));


		Mat weights_x=dist_px/sample_res;
		Mat weights_x_l=Less(weights_x,1);
		weights_x=(1-weights_x).mul(weights_x_l);

		Mat weights_y=dist_py/sample_res;
		Mat weights_y_l=Less(weights_y,1);
		weights_y=(1-weights_y).mul(weights_y_l);
		Mat weights=weights_x.mul(weights_y);

		Mat curr_sift=Mat::zeros(num_angles,num_samples,CV_32F);
		for(int a=0;a<num_angles;a++)
		{
			//Mat I=getNum(I_orientation[a],y_lo,y_hi,x_lo,x_hi);
			Mat I = I_orientation[a](Range(y_lo, y_hi), Range(x_lo, x_hi));
			Mat tep=reshapeY(I);

			// Fill tep with zeros to fit size of weight
			if (tep.cols < weights.cols)
			{
				for (int i = tep.rows; i < weights.rows; i++)
					tep.push_back(0.0f);
			}

			tep=repmat(tep,1,num_samples);
			Mat t=tep.mul(weights);
			Mat ta=sum_every_col(t);
			float *p=ta.ptr<float>(0);
			for(int i=0;i<curr_sift.cols;i++)
			{
				curr_sift.at<float>(a,i)=p[i];
			}

		}
		Mat tp=reshapeX(curr_sift);
		float *p=tp.ptr<float>(0);
		for(int j=0;j<sift_arr.cols;j++)
		{
			sift_arr.at<float>(i,j)=p[j];
		}

	}

	return sift_arr;
}
int main(int argc, char** argv)
{
	
	string filter = "Gaussian";
	string descriptor = "HAOG";
	string database = "CUFSF";
	uint count = 0;
	
	vector<string> extraPhotos, photos, sketches;
	
	loadImages(argv[1], photos);
	loadImages(argv[2], sketches);
	//loadImages(argv[7], extraPhotos);
	
	uint nPhotos = photos.size(),
	nSketches = sketches.size(),
	nExtra = extraPhotos.size();
	
	uint nTraining = 2*nPhotos/3;
	
	cout << "Read " << nSketches << " sketches." << endl;
	cout << "Read " << nPhotos + nExtra << " photos." << endl;
	
	vector<Mat*> sketchesDescriptors(nSketches), photosDescriptors(nPhotos), extraDescriptors(nExtra);
	
	Mat img, temp;
	
	int size=32, delta=16;
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nSketches; i++){
		img = imread(sketches[i],0);
		sketchesDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(sketchesDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nPhotos; i++){
		img = imread(photos[i],0);
		photosDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(photosDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nExtra; i++){
		img = imread(extraPhotos[i],0);
		extraDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(extraDescriptors[i]) = temp.clone();
	}
	
	auto seed = unsigned(count);
	
	srand(seed);
	random_shuffle(sketchesDescriptors.begin(), sketchesDescriptors.end());
	srand(seed);
	random_shuffle(photosDescriptors.begin(), photosDescriptors.end());
	
	//training
	vector<Mat*> trainingSketchesDescriptors, trainingPhotosDescriptors;
	
	trainingSketchesDescriptors.insert(trainingSketchesDescriptors.end(), sketchesDescriptors.begin(), sketchesDescriptors.begin()+nTraining);
	trainingPhotosDescriptors.insert(trainingPhotosDescriptors.end(), photosDescriptors.begin(), photosDescriptors.begin()+nTraining);
	
	//testing
	vector<Mat*> testingSketchesDescriptors, testingPhotosDescriptors;
	
	testingSketchesDescriptors.insert(testingSketchesDescriptors.end(), sketchesDescriptors.begin()+nTraining, sketchesDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), photosDescriptors.begin()+nTraining, photosDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), extraDescriptors.begin(), extraDescriptors.end());
	
	PCA pca;
	LDA lda;
	vector<int> labels;
	
	uint nTestingSketches = testingSketchesDescriptors.size(),
	nTestingPhotos = testingPhotosDescriptors.size();
	
	for(uint i=0; i<nTraining; i++){
		labels.push_back(i);
	}
	labels.insert(labels.end(),labels.begin(),labels.end());
	
	//bags
	vector<Mat*> testingSketchesDescriptorsBag(nTestingSketches), testingPhotosDescriptorsBag(nTestingPhotos);
	
	for(int b=0; b<200; b++){
		
		vector<int> bag_indexes = gen_bag(154, 0.1);
		
		uint dim = (bag(*(trainingPhotosDescriptors[0]), bag_indexes, 154)).total();
		
		Mat X(dim, 2*nTraining, CV_32F);
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining; i++){
			temp = *(trainingSketchesDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp.copyTo(X.col(i));
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining; i++){
			temp = *(trainingPhotosDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp.copyTo(X.col(i+nTraining));
		}
		
		Mat Xs = X(Range::all(), Range(0,nTraining));
		Mat Xp = X(Range::all(), Range(nTraining,2*nTraining));
		
		Mat meanX = Mat::zeros(dim, 1, CV_32F), instance;
		Mat meanXs = Mat::zeros(dim, 1, CV_32F);
		Mat meanXp = Mat::zeros(dim, 1, CV_32F);
		
		// calculate sums
		for (int i = 0; i < X.cols; i++) {
			instance = X.col(i);
			add(meanX, instance, meanX);
		}
		
		for (int i = 0; i < Xs.cols; i++) {
			instance = Xs.col(i);
			add(meanXs, instance, meanXs);
		}
		
		for (int i = 0; i < Xp.cols; i++) {
			instance = Xp.col(i);
			add(meanXp, instance, meanXp);
		}
		
		// calculate total mean
		meanX.convertTo(meanX, CV_32F, 1.0/static_cast<double>(X.cols));
		meanXs.convertTo(meanXs, CV_32F, 1.0/static_cast<double>(Xs.cols));
		meanXp.convertTo(meanXp, CV_32F, 1.0/static_cast<double>(Xp.cols));
		
		
		// subtract the mean of matrix
		for(int i=0; i<X.cols; i++) {
			Mat c_i = X.col(i);
			subtract(c_i, meanX.reshape(1,dim), c_i);
		}
		
		for(int i=0; i<Xs.cols; i++) {
			Mat c_i = Xs.col(i);
			subtract(c_i, meanXs.reshape(1,dim), c_i);
		}
		
		for(int i=0; i<Xp.cols; i++) {
			Mat c_i = Xp.col(i);
			subtract(c_i, meanXp.reshape(1,dim), c_i);
		}
		
		if(meanX.total() >= nTraining)
			pca(X, Mat(), CV_PCA_DATA_AS_COL, nTraining-1);
		else
			pca.computeVar(X, Mat(), CV_PCA_DATA_AS_COL, .99);
		
		Mat W1 = pca.eigenvectors.t();
		Mat ldaData = (W1.t()*X).t();
		lda.compute(ldaData, labels);
		Mat W2 = lda.eigenvectors();
		W2.convertTo(W2, CV_32F);
		Mat projectionMatrix = (W2.t()*W1.t()).t();
		
		//testing
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingSketches; i++){
			temp = *(testingSketchesDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingSketchesDescriptorsBag[i] = new Mat();
				*(testingSketchesDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingSketchesDescriptorsBag[i]), temp, *(testingSketchesDescriptorsBag[i]));
			}
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingPhotos; i++){
			temp = *(testingPhotosDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingPhotosDescriptorsBag[i] = new Mat();
				*(testingPhotosDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingPhotosDescriptorsBag[i]), temp, *(testingPhotosDescriptorsBag[i]));
			}
		}
	}
	
	Mat distancesChi = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	Mat distancesL2 = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	Mat distancesCosine = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	
	#pragma omp parallel for
	for(uint i=0; i<nTestingSketches; i++){
		for(uint j=0; j<nTestingPhotos; j++){
			distancesChi.at<double>(i,j) = chiSquareDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]));
			distancesL2.at<double>(i,j) = norm(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]));
			distancesCosine.at<double>(i,j) = abs(1-cosineDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j])));
		}
	}
	
	string file1name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("chi") + to_string(count) + string(".xml");
	string file2name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("l2") + to_string(count) + string(".xml");
	string file3name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("cosine") + to_string(count) + string(".xml");
	
	FileStorage file1(file1name, FileStorage::WRITE);
	FileStorage file2(file2name, FileStorage::WRITE);
	FileStorage file3(file3name, FileStorage::WRITE);
	
	file1 << "distanceMatrix" << distancesChi;
	file2 << "distanceMatrix" << distancesL2;
	file3 << "distanceMatrix" << distancesCosine;
	
	file1.release();
	file2.release();
	file3.release();
	
	return 0;
}
// Filteres a face picture with 40 Gabor-filters, arranging the results into 3D-array-like
// structure and runs 3D-LBP operation on them. Outputs an idetification histogram after
// segmentation and concatenation in left-right, top-down order.
// If step is 0 or 1 returns a debug picture in "face". ind = index per (freq*orientation)+rot
void expression_recognizer::gaborLBPHistograms(Mat& face, Mat& hist, Mat& lut, int N, int step, int ind) {
	CV_Assert( face.depth() == CV_8U );
	CV_Assert( face.channels() == 1 );

	CV_Assert( lut.depth() == CV_8U );
	CV_Assert( lut.channels() == 1 );
	CV_Assert( lut.total() == 256 );
	CV_Assert( lut.isContinuous() );

	Mat tmppic = Mat( Size(64, 64), CV_64FC2);
	Mat holder = Mat( Size(64, 64), CV_64FC1);

	vector<Mat> planes;
	resize(face, face, Size(64,64));
	//face = face.colRange(8,80);//.clone();
	vector<Mat> doubleface;
	face.convertTo(face, CV_64FC2);

	int m = getOptimalDFTSize( face.size().height );
	int n = getOptimalDFTSize( face.size().width );
	copyMakeBorder(face, face, 0, m - face.size().height, 0, n - face.size().width, BORDER_CONSTANT, Scalar::all(0));

	doubleface.clear();
	doubleface.push_back(face);
	doubleface.push_back(face);
	merge( doubleface, face );

	dft(face, face, DFT_COMPLEX_OUTPUT + DFT_SCALE, 0);

	vector<Mat> gaborCube(scale*orientation);
	vector<Mat> binaryGaborVolume;
	for (unsigned int freq=0;freq<scale;freq++) {
		for (unsigned int rot=0;rot<orientation;rot++) {
			unsigned int index = (freq*orientation)+rot;

			Mat tmp = gaborKernels[index];
			mulSpectrums(face, tmp, tmppic, 0, false);
			idft(tmppic, tmppic, DFT_SCALE, 0);

			planes.clear();
			split(tmppic, planes);
			Mat p0=planes[0];
			Mat p1=planes[1];
			magnitude(p0,p1,holder);
			//holder = holder.colRange(0, 64).rowRange(0,64);
			// From real and imaginary parts we can get the magnitude for identification
			// add 1px borders for later, store in gabor-cube

			copyMakeBorder(holder, holder,1, 1, 1, 1, BORDER_CONSTANT, Scalar::all(0));
			gaborCube[index] = holder.clone();
		}
	}

	if (step == 0) face = gaborCube[ind];

	vector<Mat> LBP;
	Mat lbp = Mat(64,64,CV_8U);

	for (unsigned int freq=0;freq<scale;freq++) {
		for (unsigned int rot=0;rot<orientation;rot++) {

			unsigned int index = rot+(freq*orientation);
			Mat thiz = gaborCube[index];
			uchar pix = 0;

			for (unsigned int y=1;y<thiz.size().height-1;y++) {
				for (unsigned int x=1;x<thiz.size().width-1;x++) {
					pix = 0;
					double center = thiz.at<double>(y,x);

					// indices 1,3,5 and 7 are normal closest neighbor LBP
					if (thiz.at<double>(y-1,x) >= center ) pix += powtable[1];
					if (thiz.at<double>(y,x+1) >= center ) pix += powtable[3];
					if (thiz.at<double>(y+1,x) >= center ) pix += powtable[5];
					if (thiz.at<double>(y,x-1) >= center ) pix += powtable[7];

					// orientation neighbors are indices 2 and 6
					if (rot > 0) {
						Mat back = gaborCube[index-1];
						if ( back.at<double>(y,x) >= center ) pix += powtable[2];
					}
					if (rot < orientation-1) {
						Mat front = gaborCube[index+1];
						if ( front.at<double>(y,x) >= center ) pix += powtable[6];
					}
					//scale neighbors, indices 0,4
					if (freq > 0 ) {
						Mat back = gaborCube[index-orientation];
						if ( back.at<double>(y,x) >= center) pix += powtable[0];
					}

					if (freq < scale-1) {
						Mat front = gaborCube[index+orientation];
						if ( front.at<double>(y,x) >= center) pix += powtable[4];
					}

					lbp.at<uchar>(y-1,x-1) = pix;
				}
			}

			// 59 uniform patterns
			if (N>0) LUT(lbp, lut, lbp);

			LBP.push_back(lbp.clone());
		}
	}

	if (step == 1) face = LBP[ind];

	int histSize[] = {256};
	float range[] = {0, 256};
	const float* histRange[] = {range};
	int channels[]={0};

	static double areaWeights[] =		{  1,1,1,1,1,1,1,1,
								   	   	   1,1,1,1,1,1,1,1,
										   1,4,4,3,3,4,4,1,
										   1,4,4,3,3,4,4,1,
										   0,1,1,1,1,1,1,0,
										   0,1,2,2,2,2,1,0,
										   0,1,2,2,2,2,1,0,
										   0,0,1,1,1,1,0,0 };


	static unsigned int xstep=8, ystep=8;
	static unsigned int xsize=8, ysize=8;

	for (unsigned int y = 0;y<ystep;y++) {
		for (unsigned int x = 0;x<xstep;x++) {
			Mat accuhist = Mat::zeros(256,1,CV_32F);
			unsigned int weight = areaWeights[x+(y*xsize)];

			if (weight != 0) {
				for (unsigned int i=0;i<scale*orientation;i++) {
					Mat tempHist = Mat::zeros(256,1,CV_32F);
					lbp = LBP[i];

					Mat roi = lbp.rowRange(y*ysize, (y+1)*ysize).colRange(x*xsize,(x+1)*xsize);
					calcHist(&roi, 1, 0, Mat(), tempHist, 1, histSize, histRange, true, false );
					scaleAdd(tempHist, 1, accuhist, accuhist);
				}

				if (N>0) accuhist = accuhist.rowRange(0, N);
				// cut from 256 values per 8x8 area to 8 values per area
				//dump( accuhist );
				 hist.push_back(accuhist.clone());
				//cuts the ID vector length even more
			}
		}
	}

	normalize( hist, hist, 0, 1, NORM_MINMAX, -1, noArray());
}
Example #20
0
int main(int argc, char **argv)
{
    CommandLineParser parser(argc, argv, keys);

    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    String modelFile = parser.get<String>("model");
    String imageFile = parser.get<String>("image");

    if (!parser.check())
    {
        parser.printErrors();
        return 0;
    }

    String classNamesFile = parser.get<String>("c_names");
    String resultFile = parser.get<String>("result");

    //! [Read model and initialize network]
    dnn::Net net = dnn::readNetFromTorch(modelFile);

    //! [Prepare blob]
    Mat img = imread(imageFile), input;
    if (img.empty())
    {
        std::cerr << "Can't read image from the file: " << imageFile << std::endl;
        exit(-1);
    }

    Size origSize = img.size();
    Size inputImgSize = cv::Size(1024, 512);

    if (inputImgSize != origSize)
        resize(img, img, inputImgSize);       //Resize image to input size

    Mat inputBlob = blobFromImage(img, 1./255);   //Convert Mat to image batch
    //! [Prepare blob]

    //! [Set input blob]
    net.setInput(inputBlob, "");        //set the network input
    //! [Set input blob]

    TickMeter tm;

    String oBlob = net.getLayerNames().back();
    if (!parser.get<String>("o_blob").empty())
    {
        oBlob = parser.get<String>("o_blob");
    }

    //! [Make forward pass]
    tm.start();
    Mat result = net.forward(oBlob);
    tm.stop();

    if (!resultFile.empty()) {
        CV_Assert(result.isContinuous());

        ofstream fout(resultFile.c_str(), ios::out | ios::binary);
        fout.write((char*)result.data, result.total() * sizeof(float));
        fout.close();
    }

    std::cout << "Output blob: " << result.size[0] << " x " << result.size[1] << " x " << result.size[2] << " x " << result.size[3] << "\n";
    std::cout << "Inference time, ms: " << tm.getTimeMilli()  << std::endl;

    if (parser.has("show"))
    {
        std::vector<String> classNames;
        vector<cv::Vec3b> colors;
        if(!classNamesFile.empty()) {
            colors = readColors(classNamesFile, classNames);
        }
        Mat segm, legend;
        colorizeSegmentation(result, segm, legend, classNames, colors);

        Mat show;
        addWeighted(img, 0.1, segm, 0.9, 0.0, show);

        cv::resize(show, show, origSize, 0, 0, cv::INTER_NEAREST);
        imshow("Result", show);
        if(classNames.size())
            imshow("Legend", legend);
        waitKey();
    }

    return 0;
} //main
Example #21
0
void detect_and_draw( IplImage* img ,IplImage* img_old,int minX,int minY)
{
	static CvScalar colors = CV_RGB( 255, 0, 0);
	printf("detect=++++++++++++++++++++++++++++++==\n");
	double scale = 1.3;
	IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
	IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),cvRound (img->height/scale)),8, 1 );


	cvCvtColor( img, gray, CV_BGR2GRAY );
	//dlx-----
	//cvSaveImage("/save_gray.bmp",gray);
	double t = (double)cvGetTickCount();
	cvResize( gray, small_img, CV_INTER_LINEAR );
	t = (double)cvGetTickCount() - t;
	printf( "cvResize time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
	t = (double)cvGetTickCount();
	cvEqualizeHist( small_img, small_img );

	t = (double)cvGetTickCount() - t;
	printf( "cvEqualizeHist time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
	printf("detect=++++++++++++++++++++++++++++++==0.1\n");
	cvClearMemStorage( storage );
	if( cascade )
	{
		int i;
		t = (double)cvGetTickCount();
		CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,1.2, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(30, 30) );
		t = (double)cvGetTickCount() - t;
		printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );

		for( i = 0; i < (faces ? faces->total : 0); i++ )
		{
			CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
			CvPoint center;
			int radius;
			center.x = cvRound((r->x + r->width*0.5)*scale + minX);
			center.y = cvRound((r->y + r->height*0.5)*scale + minY);
			radius = cvRound((r->width + r->height)*0.25*scale);
			cvCircle( img_old, center, radius, colors, 3, 8, 0 );
			printf("heloo------------------------------------people\n");


			// send the image to server
			Mat ROI_mat = Mat(img);
			Mat faceImg = ROI_mat(*r).clone();

			sprintf(bufferImage,"rows=%d , cols=%d , image.total()=%d \n",faceImg.rows,faceImg.cols,faceImg.total());
	      faceImg = (faceImg.reshape(0,1));
	      
	      portno = 9001;
	      sockfd = socket(AF_INET, SOCK_STREAM, 0);
	      if (sockfd < 0)
				printf("ERROR opening socket\n");

	      //bzero((char *) &serv_addr, sizeof(serv_addr));
	      memset(&serv_addr, 0, sizeof(serv_addr));
	      serv_addr.sin_family = AF_INET;
	      serv_addr.sin_addr.s_addr = inet_addr("192.168.1.40");
	      serv_addr.sin_port = htons(portno);
	      if (connect(sockfd,(struct sockaddr *) &serv_addr,sizeof(serv_addr)) < 0)
		  		perror("ERROR connecting1\n");
		  		
		  		
	      printf("socket fd=%d\n",sockfd);
	
			bzero(buffer,32);
			n = recv(sockfd,buffer,8,0);//server welcome
			printf("recv n =%d\n",n);
			if(0 == strcmp(serverWelcome,buffer))
			{
				printf("recv server hello\n");
				printf(buffer);
				bzero(buffer,32);
			}else{
				close(sockfd);
				break;
			}
	
			printf("rows =%d , cols = %d ,image.total() = %d \n",faceImg.rows,faceImg.cols,faceImg.total());
		    //n = send(sockfd,image.data,image.total()*image.channels(),0);
		    n = send(sockfd,bufferImage,strlen(bufferImage),0);
		    	printf("send n =%d\n",n);
		    if (n < 0) 
			 perror("ERROR writing to socket");
			 
		    n = send(sockfd,faceImg.data,faceImg.total()*faceImg.elemSize(),0);
		    printf("send n =%d\n",n);
			if (n < 0) 
				perror("ERROR writing to socket");
			//char byeBuffer[5]	= "bye";
			 n = recv(sockfd,buffer,4,0);
			 if(0 == strcmp(serverBye,buffer))
			 {
			 	printf("recv server bye\n");
				printf(buffer);
			 }else{
				close(sockfd);
				break;
			}
		    close(sockfd);

		}
		printf("detect=++++++++++++++++++++++++++++++1\n");
	}
	printf("==============================================\n");
			//const char* filename =  (char*)"./detection.bmp";
	cvSaveImage("/tmp_detection.bmp",img_old);//save the result as "detection.bmp"
	cvReleaseImage( &gray );
	cvReleaseImage( &small_img );


}
Example #22
0
//------------------------------------------------------------------------------
// Fisherfaces
//------------------------------------------------------------------------------
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
    if(src.total() == 0) {
        String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
        CV_Error(Error::StsBadArg, error_message);
    } else if(_lbls.getMat().type() != CV_32SC1) {
        String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
        CV_Error(Error::StsBadArg, error_message);
    }
    // make sure data has correct size
    if(src.total() > 1) {
        for(int i = 1; i < static_cast<int>(src.total()); i++) {
            if(src.getMat(i-1).total() != src.getMat(i).total()) {
                String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
                CV_Error(Error::StsUnsupportedFormat, error_message);
            }
        }
    }
    // get data
    Mat labels = _lbls.getMat();
    Mat data = asRowMatrix(src, CV_64FC1);
    // number of samples
    int N = data.rows;
    // make sure labels are passed in correct shape
    if(labels.total() != (size_t) N) {
        String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
        CV_Error(Error::StsBadArg, error_message);
    } else if(labels.rows != 1 && labels.cols != 1) {
        String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
       CV_Error(Error::StsBadArg, error_message);
    }
    // clear existing model data
    _labels.release();
    _projections.clear();
    // safely copy from cv::Mat to std::vector
    std::vector<int> ll;
    for(unsigned int i = 0; i < labels.total(); i++) {
        ll.push_back(labels.at<int>(i));
    }
    // get the number of unique classes
    int C = (int) remove_dups(ll).size();
    // clip number of components to be a valid number
    if((_num_components <= 0) || (_num_components > (C-1)))
        _num_components = (C-1);
    // perform a PCA and keep (N-C) components
    PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
    // project the data and perform a LDA on it
    LDA lda(pca.project(data),labels, _num_components);
    // store the total mean vector
    _mean = pca.mean.reshape(1,1);
    // store labels
    _labels = labels.clone();
    // store the eigenvalues of the discriminants
    lda.eigenvalues().convertTo(_eigenvalues, CV_64FC1);
    // Now calculate the projection matrix as pca.eigenvectors * lda.eigenvectors.
    // Note: OpenCV stores the eigenvectors by row, so we need to transpose it!
    gemm(pca.eigenvectors, lda.eigenvectors(), 1.0, Mat(), 0.0, _eigenvectors, GEMM_1_T);
    // store the projections of the original data
    for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
        Mat p = LDA::subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
        _projections.push_back(p);
    }
}
Example #23
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times)
    {
        CV_INSTRUMENT_REGION()

        // check inputs
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);
        CV_Assert(times.type() == CV_32FC1);

        // create output
        int channels = images[0].channels();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
        int rows = images[0].rows;
        int cols = images[0].cols;

        dst.create(LDR_SIZE, 1, CV_32FCC);
        Mat result = dst.getMat();

        // pick pixel locations (either random or in a rectangular grid)
        std::vector<Point> points;
        points.reserve(samples);
        if(random) {
            for(int i = 0; i < samples; i++) {
                points.push_back(Point(rand() % cols, rand() % rows));
            }
        } else {
            int x_points = static_cast<int>(sqrt(static_cast<double>(samples) * cols / rows));
            CV_Assert(0 < x_points && x_points <= cols);
            int y_points = samples / x_points;
            CV_Assert(0 < y_points && y_points <= rows);
            int step_x = cols / x_points;
            int step_y = rows / y_points;
            for(int i = 0, x = step_x / 2; i < x_points; i++, x += step_x) {
                for(int j = 0, y = step_y / 2; j < y_points; j++, y += step_y) {
                    if( 0 <= x && x < cols && 0 <= y && y < rows ) {
                        points.push_back(Point(x, y));
                    }
                }
            }
            // we can have slightly less grid points than specified
            //samples = static_cast<int>(points.size());
        }

        // we need enough equations to ensure a sufficiently overdetermined system
        // (maybe only as a warning)
        //CV_Assert(points.size() * (images.size() - 1) >= LDR_SIZE);

        // solve for imaging system response function, over each channel separately
        std::vector<Mat> result_split(channels);
        for(int ch = 0; ch < channels; ch++) {
            // initialize system of linear equations
            Mat A = Mat::zeros((int)points.size() * (int)images.size() + LDR_SIZE + 1,
                LDR_SIZE + (int)points.size(), CV_32F);
            Mat B = Mat::zeros(A.rows, 1, CV_32F);

            // include the data-fitting equations
            int k = 0;
            for(size_t i = 0; i < points.size(); i++) {
                for(size_t j = 0; j < images.size(); j++) {
                    // val = images[j].at<Vec3b>(points[i].y, points[i].x)[ch]
                    int val = images[j].ptr()[channels*(points[i].y * cols + points[i].x) + ch];
                    float wij = w.at<float>(val);
                    A.at<float>(k, val) = wij;
                    A.at<float>(k, LDR_SIZE + (int)i) = -wij;
                    B.at<float>(k, 0) = wij * log(times.at<float>((int)j));
                    k++;
                }
            }

            // fix the curve by setting its middle value to 0
            A.at<float>(k, LDR_SIZE / 2) = 1;
            k++;

            // include the smoothness equations
            for(int i = 0; i < (LDR_SIZE - 2); i++) {
                float wi = w.at<float>(i + 1);
                A.at<float>(k, i) = lambda * wi;
                A.at<float>(k, i + 1) = -2 * lambda * wi;
                A.at<float>(k, i + 2) = lambda * wi;
                k++;
            }

            // solve the overdetermined system using SVD (least-squares problem)
            Mat solution;
            solve(A, B, solution, DECOMP_SVD);
            solution.rowRange(0, LDR_SIZE).copyTo(result_split[ch]);
        }

        // combine log-exposures and take its exponent
        merge(result_split, result);
        exp(result, result);
    }
int main(int argc, char *argv[]){

  ros::init(argc,argv,"aruco_test");
  ros::NodeHandle nh;
  int squares_x = 3;
  int squares_y = 3;

  float square_length = 2.0;
  float marker_length = 1.5;
  int dictionaryId = 1;

  namedWindow("detection_result");
  startWindowThread();
  namedWindow("board");
  startWindowThread();

  Ptr<aruco::Dictionary> dictionary = 
    aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

  VideoCapture input;
  input.open(0);
  
  Ptr<aruco::CharucoBoard> charucoboard = aruco::CharucoBoard::create(squares_x,squares_y,square_length, marker_length, dictionary);
  Mat boardImg;
  charucoboard->draw(cv::Size(1000,1000),boardImg,10,1);
  imshow("board",boardImg);
  Ptr<aruco::Board> board = charucoboard.staticCast<aruco::Board>();

  Mat distCoeffs = (Mat_<float>(1,5) << 0.182276,-0.533582,0.000520,-0.001682,0.000000);
  Mat camMatrix  = (Mat_<float>(3,3) << 
		    743.023418,0.000000,329.117496,
		    0.000000,745.126083,235.748102,
		    0.000000,0.000000,1.000000);

  while(input.grab() && ros::ok()){
    Mat image;
    input.retrieve(image);
  
    vector<int> markerIds,charucoIds;
    vector<vector<Point2f> > markerCorners, rejectedMarkers;
    vector<Point2f> charucoCorners;
    Vec3d rvec, tvec;

    Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();

    aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams,rejectedMarkers);

    int interpolatedCorners = 0;
    if(markerIds.size() > 0)
      interpolatedCorners =
	aruco::interpolateCornersCharuco(markerCorners, markerIds, image, 
					 charucoboard, charucoCorners, charucoIds, camMatrix, distCoeffs);

    bool validPose = false;

    if(camMatrix.total()!=0)
      validPose = aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds,charucoboard, camMatrix, distCoeffs, rvec, tvec);

    Mat result;

    image.copyTo(result);
  
    if(markerIds.size() > 0)
      aruco::drawDetectedMarkers(result, markerCorners);

    if(interpolatedCorners > 0){
      Scalar color(255,0,0);
      aruco::drawDetectedCornersCharuco(result, charucoCorners, charucoIds, color);
    }

    if(validPose)
      aruco::drawAxis(result, camMatrix, distCoeffs, rvec, tvec, 1.0f);
  
    imshow("detection_result",result);
  } 
  return 0;
}
Example #25
0
void ONNXImporter::populateNet(Net dstNet)
{
    CV_Assert(model_proto.has_graph());
    opencv_onnx::GraphProto graph_proto = model_proto.graph();
    std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
    // List of internal blobs shapes.
    std::map<std::string, MatShape> outShapes;
    // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
    for (int i = 0; i < graph_proto.input_size(); ++i)
    {
        opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
        CV_Assert(valueInfoProto.has_type());
        opencv_onnx::TypeProto typeProto = valueInfoProto.type();
        CV_Assert(typeProto.has_tensor_type());
        opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
        CV_Assert(tensor.has_shape());
        opencv_onnx::TensorShapeProto tensorShape = tensor.shape();

        MatShape inpShape(tensorShape.dim_size());
        for (int j = 0; j < inpShape.size(); ++j)
        {
            inpShape[j] = tensorShape.dim(j).dim_value();
        }
        outShapes[valueInfoProto.name()] = inpShape;
    }

    std::string framework_name;
    if (model_proto.has_producer_name()) {
        framework_name = model_proto.producer_name();
    }

    // create map with network inputs (without const blobs)
    std::map<std::string, LayerInfo> layer_id;
    std::map<std::string, LayerInfo>::iterator layerId;
    std::map<std::string, MatShape>::iterator shapeIt;
    // fill map: push layer name, layer id and output id
    std::vector<String> netInputs;
    for (int j = 0; j < graph_proto.input_size(); j++)
    {
        const std::string& name = graph_proto.input(j).name();
        if (constBlobs.find(name) == constBlobs.end()) {
            netInputs.push_back(name);
            layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
        }
    }
    dstNet.setInputsNames(netInputs);

    int layersSize = graph_proto.node_size();
    LayerParams layerParams;
    opencv_onnx::NodeProto node_proto;

    for(int li = 0; li < layersSize; li++)
    {
        node_proto = graph_proto.node(li);
        layerParams = getLayerParams(node_proto);
        CV_Assert(node_proto.output_size() >= 1);
        layerParams.name = node_proto.output(0);

        std::string layer_type = node_proto.op_type();
        layerParams.type = layer_type;


        if (layer_type == "MaxPool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "MAX");
            layerParams.set("ceil_mode", isCeilMode(layerParams));
        }
        else if (layer_type == "AveragePool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "AVE");
            layerParams.set("ceil_mode", isCeilMode(layerParams));
            layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
        }
        else if (layer_type == "GlobalAveragePool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "AVE");
            layerParams.set("global_pooling", true);
        }
        else if (layer_type == "Add" || layer_type == "Sum")
        {
            if (layer_id.find(node_proto.input(1)) == layer_id.end())
            {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
                    layerParams.type = "Power";
                    layerParams.set("shift", blob.at<float>(0));
                }
                else {
                    layerParams.type = "Scale";
                    layerParams.set("bias_term", true);
                    layerParams.blobs.push_back(blob);
                }
            }
            else {
                layerParams.type = "Eltwise";
            }
        }
        else if (layer_type == "Sub")
        {
            Mat blob = getBlob(node_proto, constBlobs, 1);
            if (blob.total() == 1) {
                layerParams.type = "Power";
                layerParams.set("shift", -blob.at<float>(0));
            }
            else {
                layerParams.type = "Scale";
                layerParams.set("has_bias", true);
                layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
            }
        }
        else if (layer_type == "Div")
        {
            Mat blob = getBlob(node_proto, constBlobs, 1);
            CV_Assert_N(blob.type() == CV_32F, blob.total());
            if (blob.total() == 1)
            {
                layerParams.set("scale", 1.0f / blob.at<float>(0));
                layerParams.type = "Power";
            }
            else
            {
                layerParams.type = "Scale";
                divide(1.0, blob, blob);
                layerParams.blobs.push_back(blob);
                layerParams.set("bias_term", false);
            }
        }
        else if (layer_type == "Constant")
        {
            CV_Assert(node_proto.input_size() == 0);
            CV_Assert(layerParams.blobs.size() == 1);
            constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
            continue;
        }
        else if (layer_type == "ImageScaler")
        {
            const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
            layerParams.erase("scale");

            if (layerParams.has("bias"))
            {
                layerParams.type = "Scale";
                layerParams.blobs.push_back(
                    Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));

                layerParams.set("bias_term", true);
                Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
                for (int j = 0; j < bias.total(); j++) {
                    bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
                }
                layerParams.blobs.push_back(bias);
                layerParams.erase("bias");
            }
            else {
                layerParams.set("scale", scale);
                layerParams.type = "Power";
            }
        }
        else if (layer_type == "LeakyRelu")
        {
            layerParams.type = "ReLU";
            replaceLayerParam(layerParams, "alpha", "negative_slope");
        }
        else if (layer_type == "LRN")
        {
            replaceLayerParam(layerParams, "size", "local_size");
        }
        else if (layer_type == "BatchNormalization")
        {
            if (node_proto.input_size() != 5)
                CV_Error(Error::StsNotImplemented,
                         "Expected input, scale, bias, mean and var");

            layerParams.type = "BatchNorm";
            replaceLayerParam(layerParams, "epsilon", "eps");
            replaceLayerParam(layerParams, "spatial", "use_global_stats");

            Mat meanData = getBlob(node_proto, constBlobs, 3);
            Mat stdData =  getBlob(node_proto, constBlobs, 4);

            layerParams.blobs.push_back(meanData);
            layerParams.blobs.push_back(stdData);

            if (!node_proto.input(1).empty()) {
                layerParams.set("has_weight", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
            } else {
                layerParams.set("has_weight", false);
            }

            if (!node_proto.input(2).empty()) {
                layerParams.set("has_bias", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
            } else {
                layerParams.set("has_bias", false);
            }
        }
        else if (layer_type == "Gemm")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "InnerProduct";
            Mat weights = getBlob(node_proto, constBlobs, 1);
            int ind_num_out = 0;
            if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
                transpose(weights, weights);
                ind_num_out = 1;
            }
            layerParams.blobs.push_back(weights);

            if (node_proto.input_size() == 3) {
                Mat bias = getBlob(node_proto, constBlobs, 2);
                layerParams.blobs.push_back(bias);
            }

            layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "MatMul")
        {
            CV_Assert(node_proto.input_size() == 2);
            layerParams.type = "InnerProduct";
            Mat blob = getBlob(node_proto, constBlobs, 1);
            layerParams.blobs.push_back(blob.t());
            layerParams.set("bias_term", false);
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
        }
        else if (layer_type == "Mul")
        {
            CV_Assert(node_proto.input_size() == 2);
            if (layer_id.find(node_proto.input(1)) == layer_id.end()) {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
                    layerParams.set("scale", blob.at<float>(0));
                    layerParams.type = "Power";
                }
                else {
                    layerParams.blobs.push_back(blob);
                    layerParams.type = "Scale";
                }
            }
            else {
                layerParams.type = "Eltwise";
                layerParams.set("operation", "prod");
            }
        }
        else if (layer_type == "Conv")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Convolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "ConvTranspose")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Deconvolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
            layerParams.set("num_output", layerParams.blobs[0].size[1]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "Transpose")
        {
            layerParams.type = "Permute";
            replaceLayerParam(layerParams, "perm", "order");
        }
        else if (layer_type == "Unsqueeze")
        {
            CV_Assert(node_proto.input_size() == 1);
            Mat input = getBlob(node_proto, constBlobs, 0);

            DictValue axes = layerParams.get("axes");
            std::vector<int> dims;
            for (int j = 0; j < input.dims; j++) {
                dims.push_back(input.size[j]);
            }
            CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
            for (int j = 0; j < axes.size(); j++) {
                dims.insert(dims.begin() + axes.getIntValue(j), 1);
            }

            Mat out = input.reshape(0, dims);
            constBlobs.insert(std::make_pair(layerParams.name, out));
            continue;
        }
        else if (layer_type == "Reshape")
        {
            CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));

            if (node_proto.input_size() == 2) {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                CV_Assert(blob.type() == CV_32SC1);

                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
                    Mat input = getBlob(node_proto, constBlobs, 0);
                    Mat out = input.reshape(0, static_cast<std::vector<int> >(blob));
                    constBlobs.insert(std::make_pair(layerParams.name, out));
                    continue;
                }
                layerParams.set("dim", DictValue::arrayInt<int*>(
                            blob.ptr<int>(), blob.total() ));
            }
            else {
                DictValue shape = layerParams.get("shape");
                std::vector<int> dim;
                for (int j = 0; j < shape.size(); j++) {
                    dim.push_back(shape.getIntValue(j));
                }

                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
                    Mat input = getBlob(node_proto, constBlobs, 0);
                    Mat out = input.reshape(0, dim);
                    constBlobs.insert(std::make_pair(layerParams.name, out));
                    continue;
                }
                replaceLayerParam(layerParams, "shape", "dim");
            }
        }
        else if (layer_type == "Pad")
        {
            layerParams.type = "Padding";
        }
        else if (layer_type == "Shape")
        {
            CV_Assert(node_proto.input_size() == 1);
            shapeIt = outShapes.find(node_proto.input(0));
            CV_Assert(shapeIt != outShapes.end());
            MatShape inpShape = shapeIt->second;

            Mat shapeMat(inpShape.size(), 1, CV_32S);
            for (int j = 0; j < inpShape.size(); ++j)
                shapeMat.at<int>(j) = inpShape[j];
            shapeMat.dims = 1;

            constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
            continue;
        }
        else if (layer_type == "Gather")
        {
            CV_Assert(node_proto.input_size() == 2);
            CV_Assert(layerParams.has("axis"));
            Mat input = getBlob(node_proto, constBlobs, 0);
            Mat indexMat = getBlob(node_proto, constBlobs, 1);
            CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
            int index = indexMat.at<int>(0);
            int axis = layerParams.get<int>("axis");

            std::vector<cv::Range> ranges(input.dims, Range::all());
            ranges[axis] = Range(index, index + 1);

            Mat out = input(ranges);
            constBlobs.insert(std::make_pair(layerParams.name, out));
            continue;
        }
        else if (layer_type == "Concat")
        {
            bool hasVariableInps = false;
            for (int i = 0; i < node_proto.input_size(); ++i)
            {
                if (layer_id.find(node_proto.input(i)) != layer_id.end())
                {
                    hasVariableInps = true;
                    break;
                }
            }

            if (!hasVariableInps)
            {
                std::vector<Mat> inputs(node_proto.input_size()), concatenated;
                for (size_t i = 0; i < inputs.size(); ++i)
                {
                    inputs[i] = getBlob(node_proto, constBlobs, i);
                }
                Ptr<Layer> concat = ConcatLayer::create(layerParams);
                runLayer(concat, inputs, concatenated);

                CV_Assert(concatenated.size() == 1);
                constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
                continue;
            }
        }
        else
        {
            for (int j = 0; j < node_proto.input_size(); j++) {
                if (layer_id.find(node_proto.input(j)) == layer_id.end())
                    layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
         }

         int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
         layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0)));


         std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
         for (int j = 0; j < node_proto.input_size(); j++) {
             layerId = layer_id.find(node_proto.input(j));
             if (layerId != layer_id.end()) {
                 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
                 // Collect input shapes.
                 shapeIt = outShapes.find(node_proto.input(j));
                 CV_Assert(shapeIt != outShapes.end());
                 layerInpShapes.push_back(shapeIt->second);
             }
         }

         // Compute shape of output blob for this layer.
         Ptr<Layer> layer = dstNet.getLayer(id);
         layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
         CV_Assert(!layerOutShapes.empty());
         outShapes[layerParams.name] = layerOutShapes[0];
     }
 }
Example #26
0
void cv::updateMotionHistory( InputArray _silhouette, InputOutputArray _mhi,
                              double timestamp, double duration )
{
    CV_Assert( _silhouette.type() == CV_8UC1 && _mhi.type() == CV_32FC1 );
    CV_Assert( _silhouette.sameSize(_mhi) );

    float ts = (float)timestamp;
    float delbound = (float)(timestamp - duration);

    CV_OCL_RUN(_mhi.isUMat() && _mhi.dims() <= 2,
               ocl_updateMotionHistory(_silhouette, _mhi, ts, delbound))

    Mat silh = _silhouette.getMat(), mhi = _mhi.getMat();
    Size size = silh.size();
#ifdef HAVE_IPP
    int silhstep = (int)silh.step, mhistep = (int)mhi.step;
#endif

    if( silh.isContinuous() && mhi.isContinuous() )
    {
        size.width *= size.height;
        size.height = 1;
#ifdef HAVE_IPP
        silhstep = (int)silh.total();
        mhistep = (int)mhi.total() * sizeof(Ipp32f);
#endif
    }

#ifdef HAVE_IPP
    IppStatus status = ippiUpdateMotionHistory_8u32f_C1IR((const Ipp8u *)silh.data, silhstep, (Ipp32f *)mhi.data, mhistep,
                                                          ippiSize(size.width, size.height), (Ipp32f)timestamp, (Ipp32f)duration);
    if (status >= 0)
        return;
#endif

#if CV_SSE2
    volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
#endif

    for(int y = 0; y < size.height; y++ )
    {
        const uchar* silhData = silh.ptr<uchar>(y);
        float* mhiData = mhi.ptr<float>(y);
        int x = 0;

#if CV_SSE2
        if( useSIMD )
        {
            __m128 ts4 = _mm_set1_ps(ts), db4 = _mm_set1_ps(delbound);
            for( ; x <= size.width - 8; x += 8 )
            {
                __m128i z = _mm_setzero_si128();
                __m128i s = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(silhData + x)), z);
                __m128 s0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(s, z)), s1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(s, z));
                __m128 v0 = _mm_loadu_ps(mhiData + x), v1 = _mm_loadu_ps(mhiData + x + 4);
                __m128 fz = _mm_setzero_ps();

                v0 = _mm_and_ps(v0, _mm_cmpge_ps(v0, db4));
                v1 = _mm_and_ps(v1, _mm_cmpge_ps(v1, db4));

                __m128 m0 = _mm_and_ps(_mm_xor_ps(v0, ts4), _mm_cmpneq_ps(s0, fz));
                __m128 m1 = _mm_and_ps(_mm_xor_ps(v1, ts4), _mm_cmpneq_ps(s1, fz));

                v0 = _mm_xor_ps(v0, m0);
                v1 = _mm_xor_ps(v1, m1);

                _mm_storeu_ps(mhiData + x, v0);
                _mm_storeu_ps(mhiData + x + 4, v1);
            }
        }
#endif

        for( ; x < size.width; x++ )
        {
            float val = mhiData[x];
            val = silhData[x] ? ts : val < delbound ? 0 : val;
            mhiData[x] = val;
        }
    }
}
Example #27
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        Size size = images[0].size();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        dst.create(images[0].size(), CV_32FCC);
        Mat result = dst.getMat();

        Mat response = input_response.getMat();

        if(response.empty()) {
            response = linearResponse(channels);
            response.at<Vec3f>(0) = response.at<Vec3f>(1);
        }

        Mat log_response;
        log(response, log_response);
        CV_Assert(log_response.rows == LDR_SIZE && log_response.cols == 1 &&
                  log_response.channels() == channels);

        Mat exp_values(times);
        log(exp_values, exp_values);

        result = Mat::zeros(size, CV_32FCC);
        std::vector<Mat> result_split;
        split(result, result_split);
        Mat weight_sum = Mat::zeros(size, CV_32F);

        for(size_t i = 0; i < images.size(); i++) {
            std::vector<Mat> splitted;
            split(images[i], splitted);

            Mat w = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                LUT(splitted[c], weights, splitted[c]);
                w += splitted[c];
            }
            w /= channels;

            Mat response_img;
            LUT(images[i], log_response, response_img);
            split(response_img, splitted);
            for(int c = 0; c < channels; c++) {
                result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
            }
            weight_sum += w;
        }
        weight_sum = 1.0f / weight_sum;
        for(int c = 0; c < channels; c++) {
            result_split[c] = result_split[c].mul(weight_sum);
        }
        merge(result_split, result);
        exp(result, result);
    }
Example #28
0
int clientSender()
{
    int sockfd, portno, n;
    struct sockaddr_in serv_addr;
    struct hostent *server;

    //char buffer[256];
    /*if (argc < 2) {
       fprintf(stderr,"usage %s hostname port\n", argv[0]);
       exit(0);
    }*/
    portno = 50000;
    //portno = atoi(argv[2]);
    //printf("%d\n", portno);
    //portno = 10000;    
    /*sockfd = socket(AF_INET, SOCK_STREAM, 0);
    if (sockfd < 0) 
        error("ERROR opening socket");
    server = gethostbyname("localhost");
    if (server == NULL) {
        fprintf(stderr,"ERROR, no such host\n");
        exit(0);
    }
    bzero((char *) &serv_addr, sizeof(serv_addr));
    serv_addr.sin_family = AF_INET;
    bcopy((char *)server->h_addr, 
         (char *)&serv_addr.sin_addr.s_addr,
         server->h_length);
    serv_addr.sin_port = htons(portno);*/



    //namedWindow( "Client", CV_WINDOW_AUTOSIZE );// Create a window for display.
    
    Mat image;
    int tag = 2;
    char jpg_dumpname[]="laps00000000.jpg";
    while(tag<=8)
    {
    	sockfd = socket(AF_INET, SOCK_STREAM, 0);
	    if (sockfd < 0) 
	        error("ERROR opening socket");
	    server = gethostbyname("localhost");
	    if (server == NULL) {
	        fprintf(stderr,"ERROR, no such host\n");
	        exit(0);
	    }
	    bzero((char *) &serv_addr, sizeof(serv_addr));
	    serv_addr.sin_family = AF_INET;
	    bcopy((char *)server->h_addr, 
	         (char *)&serv_addr.sin_addr.s_addr,
	         server->h_length);
	    serv_addr.sin_port = htons(portno);
    	//printf("start\n");
    	if (connect(sockfd,(struct sockaddr *) &serv_addr,sizeof(serv_addr)) < 0) 
    	{
    	 	//printf("rey mama\n");
        	error("ERROR connecting");
    	}
    	snprintf(&jpg_dumpname[4], 9, "%08d", tag);
        strncat(&jpg_dumpname[12], ".jpg", 5);
        tag++;
        //printf("%d\n", tag);
	    image = imread(jpg_dumpname, CV_LOAD_IMAGE_COLOR);
	    if(! image.data )                              // Check for invalid input
	    {
	        cout <<  "Could not open or find the image" << std::endl ;
	        return -1;
	    }
	    
	    //imshow( "Client", image ); 
		
		image = (image.reshape(0,1)); // to make it continuous
		int  imgSize = image.total()*image.elemSize();
		n = send(sockfd, image.data, imgSize, 0);
	    if (n < 0) 
	         error("ERROR writing to socket");
	    close(sockfd);
	    //sockfd = -1;
	    //printf("end\n");
	}

    return 0;
}
Example #29
0
bool pixkit::qualityassessment::RAPSD(const Mat Spectrum1f, Mat &RAPSD1f, Mat &Anisotropy1f){

	//////////////////////////////////////////////////////////////////////////
	if(Spectrum1f.rows!=Spectrum1f.cols){
		CV_Assert(false);
	}

	//////////////////////////////////////////////////////////////////////////
	///// get mean
	float mean = (float)sum(Spectrum1f)[0] / (float)Spectrum1f.total();

	Mat map;
	map.create(cvSize(Spectrum1f.rows,Spectrum1f.cols),CV_8UC1);
	int im = 255;
	int dr = 1;
	double g = (double)im / 256.0 ;
	double g2 = g * (1 - g);
	double pi = 3.14159265;
	// for dr == 1, dd means the maximum radius of the image, only for (width == height), still need to be modified
	int dd = (int)sqrt( (double)2 * ( (Spectrum1f.rows/2) * (Spectrum1f.cols/2) ) );	
	int Number_APS = (int)((dd)/dr);
	double HalfImgWidth = Spectrum1f.rows / 2.0;

	#pragma region RAPSD	
	Mat APS_m;
	APS_m.create(1,Number_APS,CV_32FC1);
	APS_m.setTo(0);

	for (int p=0; p<Number_APS; p++)
	{
		map.setTo(0);
		double TotalE = 0;
		int Counter = 0;
		double r = p * dr;

		for (double theta=0; theta<360; theta+=0.1)
		{
			double epi = (double)theta * pi / 180.0;
			int x = (int) floor( (r * cos(epi)) +0.5 );
			int y = (int) floor( (r * sin(epi)) +0.5 );

			if ((int)HalfImgWidth+x < 0 || (int)HalfImgWidth+x >= Spectrum1f.rows)
				continue;

			if ((int)HalfImgWidth+y < 0 || (int)HalfImgWidth+y >= Spectrum1f.rows)
				continue;

			if (map.data[((int)HalfImgWidth+x) * map.cols + ((int)HalfImgWidth+y)] == 255)
				continue;

			TotalE = TotalE + Spectrum1f.ptr<float>(HalfImgWidth+x)[(int)HalfImgWidth+y] ; 

			Counter = Counter + 1;

			map.data[((int)(map.rows/2.0)+x) * map.cols + ((int)(map.cols/2.0)+y)] = 255;
		}

		if (Counter != 0 && TotalE != 0)
			APS_m.ptr<float>(0)[p]=  (float) (TotalE / Counter);
	}
	#pragma endregion

	#pragma region Anisotropy
	Mat AN_m ;
	AN_m.create(1,Number_APS,CV_32FC1);
	AN_m.setTo(0);
	for (int p=0; p<Number_APS; p++)
	{
		map.setTo(0);
		double TotalE = 0;
		int Counter = 0;
		double r = p * dr;

		for (double theta=0; theta<360; theta+=0.1)
		{
			double epi = (double)theta * pi / 180.0;
			int x = (int) floor( (r * cos(epi)) +0.5 );
			int y = (int) floor( (r * sin(epi)) +0.5 );

			if ((int)HalfImgWidth+x < 0 || (int)HalfImgWidth+x >= Spectrum1f.rows)
				continue;

			if ((int)HalfImgWidth+y < 0 || (int)HalfImgWidth+y >= Spectrum1f.rows)
				continue;

			if (map.data[((int)HalfImgWidth+x) * map.cols + ((int)HalfImgWidth+y)] == 255)
				continue;

			TotalE = TotalE + 
				(APS_m.ptr<float>(0)[p] - Spectrum1f.ptr<float>((int)HalfImgWidth+x)[(int)HalfImgWidth+y]) * 
				(APS_m.ptr<float>(0)[p] - Spectrum1f.ptr<float>((int)HalfImgWidth+x)[(int)HalfImgWidth+y]);

			Counter = Counter + 1;
			map.data[((int)(map.rows/2.0)+x) * map.cols + ((int)(map.cols/2.0)+y)] = 255;
		}

		if (Counter != 0 && TotalE != 0)
			AN_m.ptr<float>(0)[p] = 10*log10((TotalE / (Counter-1)) / (APS_m.ptr<float>(0)[p]*APS_m.ptr<float>(0)[p]));
	}
	#pragma endregion

	RAPSD1f = APS_m	/ mean;;
	Anisotropy1f = AN_m;

	return true;
}
Example #30
0
void RPPG::trackFace(Mat &frameGray) {
    
    // Make sure enough corners are available
    if (corners.size() < MIN_CORNERS) {
        detectCorners(frameGray);
    }

    Contour2f corners_1;
    Contour2f corners_0;
    vector<uchar> cornersFound_1;
    vector<uchar> cornersFound_0;
    Mat err;

    // Track face features with Kanade-Lucas-Tomasi (KLT) algorithm
    calcOpticalFlowPyrLK(lastFrameGray, frameGray, corners, corners_1, cornersFound_1, err);

    // Backtrack once to make it more robust
    calcOpticalFlowPyrLK(frameGray, lastFrameGray, corners_1, corners_0, cornersFound_0, err);

    // Exclude no-good corners
    Contour2f corners_1v;
    Contour2f corners_0v;
    for (size_t j = 0; j < corners.size(); j++) {
        if (cornersFound_1[j] && cornersFound_0[j]
            && cv::norm(corners[j]-corners_0[j]) < 2) {
            corners_0v.push_back(corners_0[j]);
            corners_1v.push_back(corners_1[j]);
        } else {
            LOGD("Mis!");
        }
    }

    if (corners_1v.size() >= MIN_CORNERS) {

        // Save updated features
        corners = corners_1v;

        // Estimate affine transform
        Mat transform = estimateRigidTransform(corners_0v, corners_1v, false);

        if (transform.total() > 0) {

            // Update box
            Contour2f boxCoords;
            boxCoords.push_back(box.tl());
            boxCoords.push_back(box.br());
            Contour2f transformedBoxCoords;
            cv::transform(boxCoords, transformedBoxCoords, transform);
            box = Rect(transformedBoxCoords[0], transformedBoxCoords[1]);

            // Update roi
            Contour2f roiCoords;
            roiCoords.push_back(roi.tl());
            roiCoords.push_back(roi.br());
            Contour2f transformedRoiCoords;
            cv::transform(roiCoords, transformedRoiCoords, transform);
            roi = Rect(transformedRoiCoords[0], transformedRoiCoords[1]);

            updateMask(frameGray);
        }

    } else {

        LOGD("Tracking failed! Not enough corners left.");
        invalidateFace();
    }
}