Example #1
0
bool WebPDecoder::readData(Mat &img)
{
    if( m_width > 0 && m_height > 0 )
    {
        if (img.cols != m_width || img.rows != m_height || img.type() != m_type)
        {
            img.create(m_height, m_width, m_type);
        }

        uchar* out_data = img.data;
        size_t out_data_size = img.cols * img.rows * img.elemSize();

        uchar *res_ptr = 0;
        if (channels == 3)
        {
            res_ptr = WebPDecodeBGRInto(data.data, data.total(), out_data,
                                        out_data_size, img.step);
        }
        else if (channels == 4)
        {
            res_ptr = WebPDecodeBGRAInto(data.data, data.total(), out_data,
                                         out_data_size, img.step);
        }

        if(res_ptr == out_data)
        {
            return true;
        }
    }

    return false;
}
Example #2
0
void mouseHandler(int event, int x, int y, int flags, void* param)
{
    if (event == CV_EVENT_LBUTTONDOWN&& !drag && !select_flag)
    {

        point1 = cv::Point(x, y);
        drag = 1;
    }

    if (event == CV_EVENT_MOUSEMOVE && drag && !select_flag)
    {
        cv::Mat img1 = first.clone();
        point2 = cv::Point(x, y);
        cv::rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
        cv::imshow(window_name, img1);
    }

    if (event == CV_EVENT_LBUTTONUP && drag && !select_flag)
    {
        cv::Mat img2 = first.clone();
        point2 = cv::Point(x, y);
        drag = 0;
        select_flag = 1;
        cv::imshow(window_name, img2);
        callback = true;
    }
	
}
Example #3
0
bool TrackerTLDImpl::initImpl(const Mat& image, const Rect2d& boundingBox)
{
    Mat image_gray;
    trackerProxy->init(image, boundingBox);
    if(image.channels() > 1)
    {
        cvtColor( image, image_gray, COLOR_BGR2GRAY );
    }
    else
    {
        image_gray = image.clone();
    }

    data = Ptr<Data>(new Data(boundingBox));
    double scale = data->getScale();
    Rect2d myBoundingBox = boundingBox;
    if( scale > 1.0 )
    {
        Mat image_proxy;
        resize(image_gray, image_proxy, Size(cvRound(image.cols * scale), cvRound(image.rows * scale)), 0, 0, DOWNSCALE_MODE);
        image_proxy.copyTo(image_gray);
        myBoundingBox.x *= scale;
        myBoundingBox.y *= scale;
        myBoundingBox.width *= scale;
        myBoundingBox.height *= scale;
    }
    model = Ptr<TrackerTLDModel>(new TrackerTLDModel(params, image_gray, myBoundingBox, data->getMinSize()));

    data->confident = false;
    data->failedLastTime = false;

    return true;
}
Example #4
0
/**
 * Main entry called from Matlab
 * @param nlhs number of left-hand-side arguments
 * @param plhs pointers to mxArrays in the left-hand-side
 * @param nrhs number of right-hand-side arguments
 * @param prhs pointers to mxArrays in the right-hand-side
 */
void mexFunction( int nlhs, mxArray *plhs[],
                  int nrhs, const mxArray *prhs[] )
{
    // Check the number of arguments
    if (nrhs<1 || (nrhs%2)!=1 || nlhs>1)
        mexErrMsgIdAndTxt("mexopencv:error","Wrong number of arguments");

    // Argument vector
    vector<MxArray> rhs(prhs,prhs+nrhs);

    // Option processing
    int flags = 1;
    for (int i=1; i<nrhs; i+=2) {
        string key = rhs[i].toString();
        if (key == "Flags")
            flags = rhs[i+1].toInt();
        else
            mexErrMsgIdAndTxt("mexopencv:error","Unrecognized option");
    }

    // Process
    Mat buf(rhs[0].toMat(CV_8U));
    Mat m = imdecode(buf, flags);
    if (m.data == NULL)
        mexErrMsgIdAndTxt("mexopencv:error","imdecode failed");
    // OpenCV's default is BGR while Matlab's is RGB
    if (m.type() == CV_8UC3)
        cvtColor(m, m, cv::COLOR_BGR2RGB);
    plhs[0] = MxArray(m);
}
Example #5
0
void detectPeople(Mat frame, bool isFlip) {
    vector<Rect> found, found_filtered;

    // we shouldn't need to flip anything - if we always use landscape mode
    if (isFlip) {
		Mat flippedFrame;
		flip(frame, flippedFrame, 1);
		flippedFrame.copyTo(frame);
    }

    hog.detectMultiScale(frame, found, 0, Size(8,8), Size(32,32), 1.05, 2);

    LOGD("found %d", found.size());

    for (int i = 0; i < found.size(); ++i) {
        Rect r = found[i];
        int j = 0;
        for (; j < found.size(); ++j) {
        	// what does & mean for Rect?
            if (j != i && (r & found[j]) == r) {
                break;
            }
        }
        if (j == found.size()) {
            found_filtered.push_back(r);
        }
    }

    for (int i = 0; i < found_filtered.size(); ++i) {
        Rect r = found_filtered[i];
        rectangle(frame, r.tl(), r.br(), Scalar(255,0,0), 3);
    }
}
/**
 * @function main
 */
int main( void )
{
    VideoCapture capture;
    Mat frame;

    //-- 1. Load the cascade
    if( !face_cascade.load( face_cascade_name.c_str() ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
    if( !eyes_cascade.load( eyes_cascade_name.c_str() ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };

    //-- 2. Read the video stream
    capture.open( 0 );
    if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }

    while ( capture.read(frame) )
    {
        if( frame.empty() )
        {
            printf(" --(!) No captured frame -- Break!");
            break;
        }

        //-- 3. Apply the classifier to the frame
        detectAndDisplay(frame);

        //-- bail out if escape was pressed
        int c = waitKey(10);
        if( (char)c == 27 ) { break; }
    }
    return 0;
}
Example #7
0
Mat foregroundBackgroundImageChange(Mat edgeDetectedImage)
{
    Mat ones_mat = Mat::ones(edgeDetectedImage.rows, edgeDetectedImage.cols, edgeDetectedImage.type());
    Mat changed_image(edgeDetectedImage.rows, edgeDetectedImage.cols, edgeDetectedImage.type());
    changed_image = ones_mat-edgeDetectedImage;
    return changed_image;
}
Example #8
0
static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
{
    Mat cameraMatrix = Mat::eye(3, 3, rtype);
    if( cameraMatrix0.size() == cameraMatrix.size() )
        cameraMatrix0.convertTo(cameraMatrix, rtype);
    return cameraMatrix;
}
Example #9
0
bool keyboard_box(Mat& f, Mat& g, Keyboard& keyboard)
{
	Point2f* p = keyboard.point_box;
	Point2f p0 = p[0];
	Point2f p1 = p[1];
	Point2f lt = p[2];
	Point2f wh = p[3];
	
	float dx = p1.x - p0.x;
	float dy = p1.y - p0.y;
	float l = sqrtf(dx*dx + dy*dy);
	float c = dx/l;
	float s = dy/l;
	float a[9];
	a[0] = c;
	a[1] = s;
	a[2] = lt.x;
	a[3] = -a[1];
	a[4] =  a[0];
	a[5] = lt.y;
	a[6] = 0;
	a[7] = 0;
	a[8] = 1;
	
	g.create(Size(wh.x, wh.y), f.type());
	EyeP_kwmap(EyeP_g, a, f.cols, f.rows, 0, f.cols, 0, f.rows, g.cols, g.rows, 0, g.cols, 0, g.rows, map_k, map_w);
	EyeP_remap_w3(f.ptr(), f.cols, f.rows, g.ptr() , g.cols, g.rows, map_k, map_w);
	
	return true;
}
Example #10
0
void CChain::Main(void)
{
	size_t i;

	CGraphPairwise	graph(nStates);
	CInferChain		inferer(graph);

	Mat nodePot = getNodePot();
	graph.addNode(nodePot);				// add the first node
	nodePot.setTo(1.0f / nStates);			// uniform distribution
	for (i = 1; i < nNodes; i++)
		graph.addNode(nodePot);			// add nodes

	Mat edgePot = getEdgePot();
	for (i = 0; i < nNodes - 1; i++)
		graph.addArc(i, i + 1, edgePot);	// add arcs

	// Inference
	inferer.infer();

	// Print Out Results
	printf("Node\t"); for (byte s = 0; s < nStates; s++) printf("State %d\t", s); printf("\n");
	printf("---------------------------------------------------------------\n");
	for (i = 0; i < nNodes; i++) {
		printf("%zd \t", i);
		graph.getNode(i, nodePot);
		printf("%.4f", nodePot.at<float>(0, 0));  for (byte s = 1; s < nStates; s++) printf("\t%.4f", nodePot.at<float>(s, 0)); printf("\n");
	}
}
Example #11
0
vector<DMatch> GraphicEnd::match( Mat desp1, Mat desp2 )
{
    cout<<"GraphicEnd::match two desp"<<endl;
    FlannBasedMatcher matcher;
    vector<DMatch> matches;

    if (desp1.empty() || desp2.empty())
    {
        return matches;
    }
    double max_dist = 0, min_dist = 100;
    matcher.match( desp1, desp2, matches);

    for (int i=0; i<desp1.rows; i++)
    {
        double dist = matches[ i ].distance;
        if (dist < min_dist)
            min_dist = dist;
        if (dist > max_dist)
            max_dist = dist;
    }

    //return matches;

    vector<DMatch> good_matches;
    for (size_t i=0; i<matches.size(); i++)
    {
        if (matches[ i ].distance <= max(4*min_dist, _match_min_dist))
        {
            good_matches.push_back(matches[ i ]);
        }
    }
    return good_matches;
}
Example #12
0
ColorFeature::ColorFeature(Mat *FMap){
	NumImgDivisions = 4;
	numElem = 6;
	//double ExtractedFeatures[NumImgDivisions*NumImgDivisions * numElem]; // definate array needs no delete
	Size s = FMap[0].size();
	Mat Image;
	int stepSizeRow = s.width / NumImgDivisions;
	int stepSizeColumn = s.height / NumImgDivisions;
	int counter = 0;
	// this loop will take the mean of the 16 subsections of the image and place each into an array
	for (int k = 0; k < numElem; k++){
		Image = FMap[k];
		s = Image.size();
		stepSizeRow = s.width / NumImgDivisions;
		stepSizeColumn = s.height / NumImgDivisions;
		for (int i = 0; i < NumImgDivisions; i++){
			for (int j = 0; j < NumImgDivisions; j++){
				// check for boundary conditions on the high end, 
				int tc = stepSizeColumn*(i + 1);
				int tr = stepSizeRow*(j + 1);
				if (tc >s.height){
					tc = s.height;
				}
				if (tr >s.width){
					tr = s.width;
				}
				Mat E = FMap[k](Range(stepSizeColumn*i, tc), Range(stepSizeRow*j, tr));
				ExtractedFeatures[counter] = mean(E)[0]; // values are returned in a vector the same length as the number of layers
				counter++;
				E.release();
			}

		}
	}
}
Example #13
0
TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
{
    std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt", false);
    std::string model = findDataFile("dnn/opencv_face_detector_uint8.pb", false);

    Net net = readNetFromTensorflow(model, proto);
    Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
    Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);

    net.setPreferableTarget(GetParam());

    net.setInput(blob);
    // Output has shape 1x1xNx7 where N - number of detections.
    // An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
    Mat out = net.forward();

    // References are from test for Caffe model.
    Mat ref = (Mat_<float>(6, 5) << 0.99520785, 0.80997437, 0.16379407, 0.87996572, 0.26685631,
                                    0.9934696, 0.2831718, 0.50738752, 0.345781, 0.5985168,
                                    0.99096733, 0.13629119, 0.24892329, 0.19756334, 0.3310290,
                                    0.98977017, 0.23901358, 0.09084064, 0.29902688, 0.1769477,
                                    0.97203469, 0.67965847, 0.06876482, 0.73999709, 0.1513494,
                                    0.95097077, 0.51901293, 0.45863652, 0.5777427, 0.5347801);
    normAssert(out.reshape(1, out.total() / 7).rowRange(0, 6).colRange(2, 7), ref, "", 2.8e-4, 3.4e-3);
}
void HOGTrainer::loadImages(const string &dir, const string &listFile, vector<Mat> &img_lst) {
    string line;
    ifstream file;

    file.open((listFile).c_str());
    if (!file.is_open()) {
        cerr << "Unable to open the list of images from " << listFile << " listFile." << endl;
        exit(-1);
    }

    bool end_of_parsing = false;
    while (!end_of_parsing) {
        cout << (dir + line).c_str() << endl;
        getline(file, line);
        if (line.empty()) // no more file to read
        {
            end_of_parsing = true;
            break;
        }
        Mat img = imread((dir + line).c_str()); // load the image
        if (img.empty()) // invalid image, just skip it.
            continue;
#ifdef _DEBUG
        imshow( "image", img );
        waitKey( 10 );
#endif
        img_lst.push_back(img.clone());
    }
}
Mat Histogram::applyKernel(int size,int type)
{

    if(_histMat.channels()>1)
        cvError(1,__FUNCTION__,"Only for 1D histograms",__FILE__,__LINE__);

    Mat output;
    Mat input=cv::Mat(1,3,CV_32FC1);
    Mat kernel=cv::Mat(size,1,CV_32FC1);

    if(type==1)
    {
        kernel.setTo(cv::Scalar::all(1));
        input.setTo(cv::Scalar::all(1));
    }
    else if(type==2)
    {
        kernel=getGaussianKernel(256,size,CV_32FC1);
    }

    Scalar sum=cv::sum(kernel);

    cv::filter2D(_histMat,output,_histMat.depth(),kernel,Point(-1,-1),0,BORDER_CONSTANT);
    if(type==1)
    {
        output.convertTo(output,output.type(),1.0/sum[0],0);
    }

    return output;


}
Example #16
0
void opencv::Convert2GRAY(const Mat &src, Mat& dst)
{
	if (CV_8UC3 == src.type())
		cv::cvtColor(src, dst, CV_BGR2GRAY);
	else
		dst = src.clone();
}
Example #17
0
bool Comparador::DeterminaOcupacao(Rect retangulo, int indexVaga, Mat anterior, Mat atual) {

	Mat regiaoA = Mat::zeros(atual.rows, atual.cols, atual.type());
	Mat regiaoB;
	regiaoA.copyTo(regiaoB);

	Mat cannyA, cannyB;

	//float limiar = 200;

	regiaoA = anterior(retangulo);
	regiaoB = atual(retangulo);

	Canny(regiaoA, cannyA, 75, 150);
	Canny(regiaoB, cannyB, 75, 150);



	cout << "Brancos antes na vaga " << indexVaga << ": " << contaBrancos(cannyA) << "   Brancos depois: " << contaBrancos(cannyB) << endl;

	int dif = contaBrancos(cannyB) - contaBrancos(cannyA);

	return contaBrancos(cannyB) > 300;


}
Example #18
0
LPBITMAPINFO opencv::CreateMapInfo(const Mat& src)
{
	BITMAPINFOHEADER BIH = { 40, 1, 1, 1, 8, 0, 0, 0, 0, 0, 0 };
	LPBITMAPINFO lpBmi;
	int          wid, hei, bits, colors, i, depth, channels;
	RGBQUAD  ColorTab[256];
	wid = mat.cols;     hei = mat.rows;
	if (CV_8UC1 == src.type())
	{
		depth = 8; channels = 1;
	}
	else if (CV_8UC3 == src.type())
	{
		depth = 8; channels = 3;
	}
	else return NULL;
	bits = depth * channels;
	if (bits>8) colors = 0;
	else colors = 1 << bits;
	lpBmi = (LPBITMAPINFO)malloc(40 + 4 * colors);
	BIH.biWidth = wid;     BIH.biHeight = hei;
	BIH.biBitCount = (BYTE)bits;
	memcpy(lpBmi, &BIH, 40);                   //  复制位图信息头
	if (bits == 8) {                           //  256 色位图
		for (i = 0; i<256; i++)  {                //  设置灰阶调色板
			ColorTab[i].rgbRed = ColorTab[i].rgbGreen = ColorTab[i].rgbBlue = (BYTE)i;
		}
		memcpy(lpBmi->bmiColors, ColorTab, 1024);
	}
	return(lpBmi);
}
Example #19
0
Mat padImageMatrix(Mat inMatrix, int maxRows, int maxCols){
    Mat paddedImage;
    
    int top, bottom, left, right;
    /// Initialize arguments for the filter
    
    //printf("The dimensions of inMatrix (in padImageMatrix fn.) are: %d, %d maxRows and maxCols: %d, %d\n", inMatrix.rows, inMatrix.cols, maxRows, maxCols);
    int mR = (int)(maxRows-inMatrix.rows);
    int mC = (int)(maxCols-inMatrix.cols);
    if(mR%2 == 0){
    	top = mR/2; 
	bottom = mR/2;
    }else{
        top = mR/2;
        bottom = mR/2+1;
    }


    if(mC%2 == 0){
       left = mC/2;
       right = mC/2;
    }else{
        left = mC/2;
        right = mC/2+1;
    }
    copyMakeBorder( inMatrix, paddedImage, top, bottom, left, right, BORDER_CONSTANT, Scalar(0,0,0) );
    
    return paddedImage.clone();
}
// Convert OpenCV Mat to QImage format
QImage ViosGui::Mat2QImage(const Mat& mat)
{
    // 8-bits unsigned, NO. OF CHANNELS=1
    if(mat.type()==CV_8UC1)
    {
        // Set the color table (used to translate colour indexes to qRgb values)
        QVector<QRgb> colorTable;
        for (int i=0; i<256; i++)
            colorTable.push_back(qRgb(i,i,i));
        // Copy input Mat
        const uchar *qImageBuffer = (const uchar*)mat.data;
        // Create QImage with same dimensions as input Mat
        QImage img(qImageBuffer, mat.cols, mat.rows, mat.step, QImage::Format_Indexed8);
        img.setColorTable(colorTable);
        return img;
    }
    // 8-bits unsigned, NO. OF CHANNELS=3
    if(mat.type()==CV_8UC3)
    {
        // Copy input Mat
        const uchar *qImageBuffer = (const uchar*)mat.data;
        // Create QImage with same dimensions as input Mat
        QImage img(qImageBuffer, mat.cols, mat.rows, mat.step, QImage::Format_RGB888);
        return img.rgbSwapped();
    }
    else
    {
        qDebug() << "ERROR: Mat could not be converted to QImage.";
        return QImage();
    }
} // MatToQImage()
Example #21
0
int main(int argc, char *argv[])
{
    Mat img = imread("lena.jpg", CV_LOAD_IMAGE_COLOR);
    if(img.empty())
       return -1;
    Mat gimg;
    Mat norm = Mat::zeros(img.rows, img.cols, CV_32F);
    cvtColor(img, gimg, CV_RGB2GRAY);
    int max1 = -1;
    for (int i=0;i<gimg.rows;i++) {
        for (int j=0;j<gimg.cols;j++) {
            max1 = max(max1, (int)gimg.at<uchar>(i, j));
        }
    }
    for (int i=0;i<gimg.rows;i++) {
        for (int j=0;j<gimg.cols;j++) {
            norm.at<float>(i, j)= gimg.at<uchar>(i, j)*1.0/max1;
        }
    }
    Sift sift;
    vector<KeyPoint> kp;
    sift.findSiftInterestPoint(norm, kp);
    int sum = 0;
    for (int i=0;i<kp.size();i++) {
        if (kp[i].octave==0) {
            sum++;
            circle(gimg, Point(kp[i].pt.y,kp[i].pt.x),1, Scalar(255,255,255));
        }
    }
    cout<<"Sum: "<<sum<<endl;
    namedWindow("lena", CV_WINDOW_AUTOSIZE );
    imshow("lena", gimg);
    waitKey(0);
    return 0;
}
Example #22
0
  //! 对多幅车牌进行SVM判断
  int PlateJudge::plateJudge(const std::vector<CPlate> &inVec,
    std::vector<CPlate> &resultVec) {
    int num = inVec.size();
    for (int j = 0; j < num; j++) {
      CPlate inPlate = inVec[j];
      Mat inMat = inPlate.getPlateMat();

      int response = -1;
      plateJudge(inMat, response);

      if (response == 1)
        resultVec.push_back(inPlate);
      else {
        int w = inMat.cols;
        int h = inMat.rows;

        //再取中间部分判断一次

        Mat tmpmat = inMat(Rect_<double>(w * 0.05, h * 0.1, w * 0.9, h * 0.8));
        Mat tmpDes = inMat.clone();
        resize(tmpmat, tmpDes, Size(inMat.size()));

        plateJudge(tmpDes, response);

        if (response == 1) resultVec.push_back(inPlate);
      }
    }
    return 0;
  }
int main(int argc, const char * argv[])
{
    if (argc != 2)
    {
        printHelp();
        exit(1);
    }

    Mat image = imread(argv[1], IMREAD_GRAYSCALE);

    cout << "Read image (" << argv[1] << "): " << image.size << ", channels: " << image.channels() << ", depth: " << image.depth() << endl;

    if (image.empty())
    {
        printHelp();
        exit(1);
    }

    Ptr<OCRHolisticWordRecognizer> wordSpotter = OCRHolisticWordRecognizer::create("dictnet_vgg_deploy.prototxt", "dictnet_vgg.caffemodel", "dictnet_vgg_labels.txt");

    std::string word;
    vector<float> confs;
    wordSpotter->run(image, word, 0, 0, &confs);

    cout << "Detected word: '" << word << "', confidence: " << confs[0] << endl;
}
Example #24
0
/* Generates <sample> from multivariate normal distribution, where <mean> - is an
   average row vector, <cov> - symmetric covariation matrix */
void randMVNormal( InputArray _mean, InputArray _cov, int nsamples, OutputArray _samples )
{
    // check mean vector and covariance matrix
    Mat mean = _mean.getMat(), cov = _cov.getMat();
    int dim = (int)mean.total();  // dimensionality
    CV_Assert(mean.rows == 1 || mean.cols == 1);
    CV_Assert(cov.rows == dim && cov.cols == dim);
    mean = mean.reshape(1,1);     // ensure a row vector

    // generate n-samples of the same dimension, from ~N(0,1)
    _samples.create(nsamples, dim, CV_32F);
    Mat samples = _samples.getMat();
    randn(samples, Scalar::all(0), Scalar::all(1));

    // decompose covariance using Cholesky: cov = U'*U
    // (cov must be square, symmetric, and positive semi-definite matrix)
    Mat utmat;
    Cholesky(cov, utmat);

    // transform random numbers using specified mean and covariance
    for( int i = 0; i < nsamples; i++ )
    {
        Mat sample = samples.row(i);
        sample = sample * utmat + mean;
    }
}
Example #25
0
void FindSquares() { //Find every countour within the image and test it to see if it is an eligible square (duplicates removed later)
    IsFindSquaresBusy=true; //set runtime boolean to busy (for receivepackets function)
    Squares.clear(); //clear the object/array which holds the vertices for each square so it is ready for a new iteration
    SquaresTempforMultiThreading.clear(); SquaresTempforMultiThreading.resize(NumberofThreads); //same idea as above -> clear all objects
    Mat pyr, gray0(AbsoluteOriginalImage.size(), CV_8U), gray; //Initialize Image Matrixes for various functions throughout
    if (TogglePyramidUpDownBlur) { //If blur is requested, carry out blurring procedures
    //The blurring procedure: down-scale and upscale the image to filter out the noise (another alternative is erode and dilate)
    pyrDown(AbsoluteOriginalImage, pyr, Size(AbsoluteOriginalImage.cols/2, AbsoluteOriginalImage.rows/2)); //downscale image
        pyrUp(pyr, gray0, AbsoluteOriginalImage.size()); //upscale image to original scaling
        if (DemoMode) {imshow(GUIWindowName, gray0); waitKey(DemoBeginFinishSleep/2);} //Display this image if in Demo Mode
    }
    else gray0=AbsoluteOriginalImage; //If blur is disabled, pass on original image as gray0 (gray original).
    if(DemoMode) {
        FindSquaresThreader(0, gray0, gray); //If in Demo Mode, run program with only one thread (otherwise it causes resource issues
        Squares=SquaresTempforMultiThreading[0]; //if in demo mode, only one array is used so go ahead and make the final array equal to the 0th array.
    }
    else { //otherwise, when not in demo mode, multithread the findsquares algorithm
    static thread Threads[NumberofThreads]; //Initialize Threading Objects
    for(int i=0; i<NumberofThreads; i++) {
        Threads[i]=thread(FindSquaresThreader, i, gray0, gray); //assign different starting indexes/points (variable i) to each thread
        }
    for(int i=0; i<NumberofThreads; i++) {
        Threads[i].join(); //wait until all threads finish execution before continuing
        }
    CombineThreadingData(); //Combine the squares each individual function found into one final squares object
    }
    IsFindSquaresBusy=false; //set runtime boolean to not busy (for receivepackets function)
}
Example #26
0
void train_test(int nclasses, const Mat &train_data, const Mat &train_labels, const Mat &test_data, const Mat &test_labels, Mat &confusion) {
    // setup the ann:
    int nfeatures = train_data.cols;
    Ptr<ml::ANN_MLP> ann = ml::ANN_MLP::create();
    Mat_<int> layers(4,1);
    layers(0) = nfeatures;     // input
    layers(1) = nclasses * 8;  // hidden
    layers(2) = nclasses * 4;  // hidden
    layers(3) = nclasses;      // output, 1 pin per class.
    ann->setLayerSizes(layers);
    ann->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM,0,0);
    ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, 0.0001));
    ann->setTrainMethod(ml::ANN_MLP::BACKPROP, 0.0001);

    // ann requires "one-hot" encoding of class labels:
    Mat train_classes = Mat::zeros(train_data.rows, nclasses, CV_32FC1);
    for(int i=0; i<train_classes.rows; i++)
    {
        train_classes.at<float>(i, train_labels.at<int>(i)) = 1.f;
    }
    cerr << train_data.size() << " " << train_classes.size() << endl;

    ann->train(train_data, ml::ROW_SAMPLE, train_classes);

    // run tests on validation set:
    for(int i=0; i<test_data.rows; i++) {
        int pred  = ann->predict(test_data.row(i), noArray());
        int truth = test_labels.at<int>(i);
        confusion.at<int>(pred, truth) ++;
    }
    Mat correct = confusion.diag();
    float accuracy = sum(correct)[0] / sum(confusion)[0];
    cerr << "accuracy: " << accuracy << endl;
    cerr << "confusion:\n" << confusion << endl;
}
//(main中使用)計算反光值,輸入影像為三通道
int CalculateReflectionValue(Mat& in)
{
    int reflectionValue;

    //將影像進行灰階
    Mat grayImg;
    cvtColor(in, grayImg, CV_BGR2GRAY);

    //計算平均值
    Scalar mean, sigma;
    meanStdDev(grayImg, mean, sigma);

    //兩種對統計圖動作的函數
    //equalizeHist( grayImg, grayImg );
    //normalize(grayImg, grayImg, 0, 255, NORM_MINMAX);

    //指定閥值,進行二值化
    int thresh = 1.2 * mean.val[0];
    int highValue = 255;
    int lowValue = 0;
    grayImg.setTo(lowValue, grayImg< thresh);
    grayImg.setTo(highValue, grayImg >= thresh);
    if(DEBUG)imshow("ReflectionDetect", grayImg);

    //再次計算平均值(二值化的成像)
    meanStdDev(grayImg, mean, sigma);

    //回傳值計算
    reflectionValue = mean.val[0];
    //cout << "reflection value = " << reflectionValue << endl;
    return reflectionValue;
}
Example #28
0
int main(int argc, char** args) {
    
    // read input data
    if (argc < 2) {
        cout << "No filename given" << endl;
        return 0;
    }
    Mat input = imread(args[1]);
    if (input.data == NULL ||
            input.channels() != 3 ||
            input.depth() != CV_8U) {
        cout << "Invalid input data" << endl;
        return 0;
    }
    
    vector<Mat> rgb = split(input);
    vector<double> angles = {0.261799, 1.309, 0.785398};
    vector<Mat> restored;

    for (int i = 0; i < 3; i++) {
        Mat rotated = rotate(rgb[i], angles[i]);
        halftone_cir(rotated, 4);
        restored.push_back(unrotate(rotated, -angles[i], rgb[i].rows, rgb[i].cols));
    }

    Mat output = merge(restored);

    // write out image
    imwrite("after.jpg", output);
}
Example #29
0
arma_warn_unused
inline
Mat<typename T1::elem_type>
hess
  (
  const Base<typename T1::elem_type,T1>& X,
  const typename arma_blas_type_only<typename T1::elem_type>::result* junk = 0
  )
  {
  arma_extra_debug_sigprint();
  arma_ignore(junk);
  
  typedef typename T1::elem_type eT;
  
  Mat<eT> H;
  Col<eT> tao;
  
  const bool status = auxlib::hess(H, X.get_ref(), tao);
  
  if(H.n_rows > 2)
    {
    for(uword i=0; i < H.n_rows-2; ++i)
      {
      H(span(i+2, H.n_rows-1), i).zeros();
      }
    }
  
  if(status == false)
    {
    H.soft_reset();
    arma_stop_runtime_error("hess(): decomposition failed");
    }
  
  return H;
  }
Example #30
0
Mat SubMat(const Mat &a, const std::vector<size_t> &ind, size_t dim) {
  
  mexAssert(ind.size() > 0, "In SubMat the index vector is empty");
  Mat submat;
  if (dim == 1) {
    size_t maxind = *(std::max_element(ind.begin(), ind.end()));    
    mexAssert(maxind < a.size1_, "In SubMat one of the indices is larger than the array size");    
    submat.resize(ind.size(), a.size2_);
    for (size_t i = 0; i < ind.size(); ++i) {
      for (size_t j = 0; j < a.size2(); ++j) {
        submat(i, j) = a(ind[i], j);
      }
    }
  } else if (dim == 2) {
    size_t maxind = *(std::max_element(ind.begin(), ind.end()));    
    mexAssert(maxind < a.size2_, "In SubMat one of the indices is larger than the array size");
    submat.resize(a.size1_, ind.size());
    for (size_t i = 0; i < a.size1_; ++i) {
      for (size_t j = 0; j < ind.size(); ++j) {
        submat(i, j) = a(i, ind[j]);
      }
    }    
  } else {
    mexAssert(false, "In Mat::SubMat the second parameter must be either 1 or 2");
  }
  return submat;
}