Пример #1
0
void CaptureFrame(Mat & detectImg){
	VideoCapture captureDevice;
	captureDevice.open(0);
	if (!captureDevice.isOpened()){
		cout << "Camera not found!";
	}
	else{
		char kbCmd = ' ';
		captureDevice >> detectImg;
		int w = detectImg.cols, h = detectImg.rows;
		bool finish = false;
		while (!finish){
			captureDevice >> detectImg;
			detectImg = detectImg(Rect(120, 40, 400, 400));

			imshow("Capture", detectImg);
			cvWaitKey(33);
			if (_kbhit()) kbCmd = _getche();
			if (kbCmd == 'c') break;
		}
		destroyAllWindows();
	}
	//VideoCapture captureDevice;
	//captureDevice.open(0);
	//if (!captureDevice.isOpened()){
	//	cout << "Camera not found!";
	//}
	//else{
	//	char kbCmd = ' ', name[30];
	//	captureDevice >> captureImage;
	//	int w = captureImage.cols, h = captureImage.rows;
	//	bool finish = false;
	//	while (!finish){
	//		setMouseCallback(window_name, onMouse, 0);
	//		captureDevice >> captureImage;
	//		finish = DisplayInfo(captureImage);

	//		if (_kbhit()) kbCmd = _getche();
	//		if (kbCmd == 'c') break;
	//	}
	//	destroyAllWindows();
	//}
}
int main(int argc, char* argv[])
{
	cout << "%%%% Road Detection Program %%%%" << endl;

	//create main window
	namedWindow(nameWindowMain,CV_WINDOW_AUTOSIZE);

	//create start button (push button)
	createButton(nameBtnStart,callbackBtnStart,NULL);

	while(cvWaitKey(33) != 27)
    {

    }

	//destroy the contents
	destroyAllWindows();

	return 0;
}
Пример #3
0
void MainWindow::encrypt()
{
    isEncryption = true;

    QString text = QInputDialog::getText(this, "Password",
                     "Please enter your password for encryption",
                      QLineEdit::Password, QString());

    string pwd = string((const char *)text.toLocal8Bit());
    MD5 md5(pwd);
    key = md5.getDigest();

    _dst = _src.clone();
    zone = Mat::zeros(_src.size(), CV_8UC1);

    setMouseCallback("Image", &mouseHandler, this);
    imshow("Image", _dst);
    waitKey(0);
    destroyAllWindows();
}
Пример #4
0
void MainWindow::dropEvent(QDropEvent *event)
{
    QList<QUrl> droppedUrls = event->mimeData()->urls();
    QString localPath = droppedUrls[0].toLocalFile();

    if (!localPath.endsWith(".png", Qt::CaseInsensitive) && !localPath.endsWith(".jpg", Qt::CaseInsensitive)) {
        QMessageBox::critical(0, "Error", "Path: "+localPath+"\nThe filetype is not supported!!");
        return;
    }

    filename = localPath.toAscii().data();
    destroyAllWindows();
    _src = imread(filename);
    if (!_src.data) {
        return;
    }
    namedWindow("Image", CV_WINDOW_AUTOSIZE);
    imshow("Image", _src);
    _dst.release();
    ui->encryptBtn->setEnabled(true);
    ui->decryptBtn->setEnabled(true);

    event->acceptProposedAction();
}
Пример #5
0
bool BallClasificationModule::run()
{
//    this->dominantcolor();
    n_size = 0;
    n_ecc = 0;
    n_color = 0;
    n_grassctx = 0;
    n_ball = 0;

    destroyAllWindows();
    if(!m_data->blobs.empty()) {
        Blob *blob;
        int i = 0;
        int cont = 0;
        std::vector<Blob>::iterator iter = m_data->blobs.begin();
        while(iter != m_data->blobs.end()) {
            blob = &*iter;
            if( coarse2fine(blob,cont) ) {
                blob->id = i++;
                iter++;
            } else {
                if(m_delInForeground)
                    deleteBlobFromForeground(blob);
                iter = m_data->blobs.erase(iter);
            }
            cont++;
        }
    }
    else
        AppendToLog("NO HAY PELOTA");
    AppendToLog("Size: " + QString::number(n_size));
    AppendToLog("Ecc : " + QString::number(n_ecc));
    AppendToLog("Col : " + QString::number(n_color));
    AppendToLog("Ball: " + QString::number(n_ball));
    return true;
}
Пример #6
0
int vehicle_det::do_iteration()
{
    //cout<<__PRETTY_FUNCTION__<<endl;
    cv::Mat img_input, src;
    cap >> src;

    if(!src.data)
    {
        printf("Exiting\n");
        return -1;
    }

    Mat img_display = src.clone();
    draw_ROI_poly(img_display);
    src.copyTo(img_input, mask);
    img_input = Mat(img_input, main_roi);
    IplImage temp = img_input;
    IplImage * frame = &temp;
    //getting the polygon
    // bgs->process(...) internally process and show the foreground mask image
    cv::Mat img_mask;
    //bgs->process(img_input, img_mask);
    get_foreground(img_input, img_mask);
    blur(img_mask, img_mask, Size(4, 4));
    img_mask = img_mask > 10;
    /*morphologyEx(img_mask, img_mask, MORPH_CLOSE, Mat(25, 2, CV_8U));
    morphologyEx(img_mask, img_mask, MORPH_OPEN, Mat(10, 10, CV_8U));*/
    morphologyEx(img_mask, img_mask, MORPH_CLOSE, Mat(2, 2, CV_8U));
    //morphologyEx(img_mask, img_mask, MORPH_OPEN, Mat(10, 10, CV_8U));
    //morphologyEx(img_mask, img_mask, MORPH_GRADIENT , Mat(5,5, CV_8U));
    //bgs->operator()(img_input,img_mask,0.2);
    //erode(img_mask, img_mask, Mat());
    //dilate(img_mask, img_mask, Mat());
    //imshow("fore", img_mask);

    if(!img_mask.empty())
    {
        //vector<Rect> rois;// to be added all the ROIs
        IplImage copy = img_mask;
        IplImage * new_mask = &copy;
        IplImage * labelImg = cvCreateImage(cvGetSize(new_mask), IPL_DEPTH_LABEL, 1);
        CvBlobs blobs, filtered_blobs;
        unsigned int result = cvb::cvLabel(new_mask, labelImg, blobs);
        cvFilterByArea(blobs, 40, 2000);
        int count = 0;

        for(CvBlobs::const_iterator it = blobs.begin(); it != blobs.end(); ++it)
        {
            count++;
            //  cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
            int x, y;
            x = (int)it->second->centroid.x;
            y = (int)it->second->centroid.y;
            //cv::Point2f p(x,y );
            // circle(img_input, p, (int)10, cv::Scalar(255, 0 , 0), 2, 8, 0);
            int x_final = 0;
            int y_final = 0;

            if(x - (width_roi / 2) <= 0)
            {
                x_final = 1;
            }
            else if(x + (width_roi / 2) >= img_input.cols)
            {
                x_final = (x - (width_roi / 2)) - (x + (width_roi / 2) - (img_input.cols - 1));
            }
            else
            {
                x_final = x - (width_roi / 2);
            }

            if(y - (height_roi / 2) <= 0)
            {
                y_final = 1;
            }
            else if(y + (height_roi / 2) >= img_input.rows)
            {
                y_final = (y - (height_roi / 2)) - (y + (height_roi / 2) - (img_input.rows - 1));
            }
            else
            {
                y_final = y - (height_roi / 2);
            }

            //printf("resized x_final=%d y_final=%d  cols=%d,  rows=%d \n", x_final,y_final,img_input.cols,img_input.rows);
            Rect roi(x_final, y_final, width_roi, height_roi);
            //rois.push_back(roi);//adding ROIs using rectangles
            //		Mat image = imread("");
            Mat image_roi = Mat(img_input, roi);
            int vehicle_ct = detect(image_roi); //getting the vehicle count per ROI

            if(vehicle_ct > 0)
            {
                filtered_blobs[it->first] = it->second;
                int matched = 0;
                int c1 = 255, c2 = 0;

                if(matched)
                {
                    c1 = 0;
                    c2 = 255;
                }
                else
                {
                    //print something to debug
                }//changing the colour of  the rectanged depending on matched or not matched

                rectangle(img_display,
                          Point(min_x + x - 5, min_y + y - 5),
                          Point(min_x + x + 5, min_y + y + 5),
                          CV_RGB(c1, c2, 0), 2, 8, 0);
                /*rectangle(img_input,
                          Point(x - 5, y - 5),
                          Point(x + 5, y + 5),
                          CV_RGB(c1, c2, 0), 2, 8, 0);*/
            }
        }

        //cvUpdateTracks(filtered_blobs, tracks, 5., 10);
        cvUpdateTracks(filtered_blobs, tracks, 10., 5);
        cvRenderBlobs(labelImg, filtered_blobs, frame, frame, CV_BLOB_RENDER_CENTROID | CV_BLOB_RENDER_BOUNDING_BOX);
        //cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_LOG);
        cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID);
        printf("num of active tracks %d\n", tracks.size());
        process_equation(tracks.size());//number of people given as input
	if(abstract_det::Total_Score<0){
		destroyAllWindows();	
	}
    }

    if(!img_display.empty())
    {
        cv::imshow("vehicle_view", img_display);
    }

    waitKey(30);
    
    return 1;
}
void Environement::cleanF(){
	destroyAllWindows();
}
Пример #8
0
/// Calculates the corner pixel locations as detected by each camera
/// In: s: board settings, includes size, square size and the number of corners
///     inputCapture1: video capture for camera 1
///     inputCapture2: video capture for camera 2
///     iterations: number of chessboard images to take
/// Out: imagePoints1: pixel coordinates of chessboard corners for camera 1
///      imagePoints2: pixel coordinates of chessboard corners for camera 2
bool RetrieveChessboardCorners(vector<vector<Point2f> >& imagePoints1, vector<vector<Point2f> >& imagePoints2, BoardSettings s, VideoCapture inputCapture1,VideoCapture inputCapture2, int iterations)
{
    destroyAllWindows();
    Mat image1,image2;
    vector<Point2f> pointBuffer1;
    vector<Point2f> pointBuffer2;
    clock_t prevTimeStamp = 0;
    bool found1,found2;
    int count = 0;
    while (count != iterations){
        char c = waitKey(15);
        if (c == 's'){
            cerr << "Calibration stopped" << endl;
            return false;
        }
        // try find chessboard corners
        else if(c == 'c'){
            // ADAPTIVE_THRESH -> use adaptive thresholding to convert image to B&W
            // FAST_CHECK -> Terminates call earlier if no chessboard in image
            // NORMALIZE_IMAGE -> normalize image gamma before thresholding
            // FILTER_QUADS -> uses additional criteria to filter out false quads
            found1 = findChessboardCorners(image1, s.boardSize, pointBuffer1,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            found2 = findChessboardCorners(image2, s.boardSize, pointBuffer2,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            
            if (found1 && found2 && (pointBuffer1.size() >= s.cornerNum) && (pointBuffer2.size() >= s.cornerNum)){
                // if time delay passed refine accuracy and store
                if ((clock() - prevTimeStamp) > CAPTURE_DELAY * 1e-3*CLOCKS_PER_SEC){
                    Mat imageGray1, imageGray2;
                    cvtColor(image1, imageGray1, COLOR_BGR2GRAY);
                    cvtColor(image2, imageGray2, COLOR_BGR2GRAY);
                    
                    // refines corner locations
                    // Size(11,11) -> size of the search window
                    // Size(-1,-1) -> indicates no dead zone in search size
                    // TermCriteria -> max 1000 iteration, to get acuraccy of 0.01
                    cornerSubPix(imageGray1, pointBuffer1, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    cornerSubPix(imageGray2, pointBuffer2, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    
                    drawChessboardCorners(image1, s.boardSize, Mat(pointBuffer1), found1);
                    imshow("Image View1", image1);
                    drawChessboardCorners(image2, s.boardSize, Mat(pointBuffer2), found2);
                    imshow("Image View2", image2);
                    
                    // user verifies the correct corners have been found
                    c = waitKey(0);
                    if (c == 's'){
                        return false;
                    }
                    if (c == 'y'){
                        // store the points and store time stamp
                        imagePoints1.push_back(pointBuffer1);
                        imagePoints2.push_back(pointBuffer2);
                        prevTimeStamp = clock();
                        count++;
                        cerr << "Count: " << count << endl;
                    }
                }
            }
        }
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
    // found all corners
    return true;
}
Пример #9
0
int main( int argc, char **argv )
{
	
// %Tag(INIT)%
	ros::init(argc, argv, "visiontracker");
// %EndTag(INIT)%

// %Tag(NODEHANDLE)%
	ros::NodeHandle n;
// %EndTag(NODEHANDLE)%

// %Tag(PUBLISHER)%
	ros::Publisher chatter_pub = n.advertise<ros_umirtx_vision::VisPMsg>("visionposition", 1000);
// %EndTag(PUBLISHER)%

	
		
	int c = 0 ;
    VideoCapture cap(c); // open the default camera
    Mat frame, frameCopy, image;
    Mat rgbimg[3], tempimg, prodimg;
	Mat imgThresholded, imgHSV;
	Mat imgResult;
	
	int minmaxhsv[3][2] = {{100,140},{85,254},{128,264}};
	int status = 1;
	int iLastXY[2] = {-1,-1};
	double dArea = 0;
	int frameHeight = 480, frameWidth = 640;
	double xpos = 0.5;
	double ypos = 0.5;
    
    ros_umirtx_vision::VisPMsg msg;
    
    if(!cap.isOpened()) {
		cout << "Capture from CAM " <<  c << " didn't work" << endl;
		return -1;
	}
	
	createControlWindow("Control", minmaxhsv, &status);
	
	try{
    if(cap.isOpened())
    {
		cap >> frame;
		
		if( frame.empty() )
			exit(0);
		
		//frame.copyTo( frameCopy );
		flip( frame, frameCopy, -1 );
		
		Mat imgLines = Mat::zeros( frameCopy.size(), CV_8UC3 );
		Mat imgResult= Mat::zeros( frameCopy.size(), CV_8UC3 );
		frameHeight = frame.rows;
		frameWidth = frame.cols;
		
        cout << "In capture ..." << endl;
        while((status>0) and (ros::ok()))
        {
			try{
				cap >> frame;
				if( frame.empty() )
					break;
				//frame.copyTo( frameCopy );
				flip( frame, frameCopy, -1 );
				//std::cout << "H:" << frameCopy.rows << " W:" << frameCopy.cols << std::endl;
				
				//imshow("Original",frame);
			}
			catch(int e)
			{
				cout << "Something went wrong while getting camera frame" << endl;
			}
            
            try{
				selectRedObj(frameCopy, imgHSV, imgThresholded, minmaxhsv);
				getCenterOfObj(imgThresholded, imgLines, iLastXY, &dArea);
				
				msg.x = 100*((double)iLastXY[0])/frameWidth;
				msg.y = 100*(double)iLastXY[1]/frameHeight;
				msg.area = 100*dArea/frameWidth/frameHeight;
				chatter_pub.publish(msg);
				
				cvtColor(imgThresholded,imgThresholded, CV_GRAY2RGB);
				addWeighted( frameCopy, 1, imgThresholded, 0.4, 0.0, imgResult);
				circle(imgResult,Point(iLastXY[0],iLastXY[1]),5,Scalar( 0, 0, 255 ),-1);
				imgResult = imgResult + imgLines;
				
				imshow("Result",imgResult);
				//if(save>0)
				//	imwrite("/home/xavier/Pictures/saves/redobjdet-05.jpg",imgResult);
			}
			catch(int e){
				cout << "Something went wrong while processing image" << endl;
			}
			
			//save = 0;
			int key = waitKey( 10 );
			if( key > 0)
			{
				key &= 255;
				cout << "Button pressed: " << key << endl;
				
				if( key == ' ' )
				{
					waitKey( 10 );
					key = 0;
					while( key != ' ')
					{
						ros::spinOnce();
						key = waitKey( 20 );
						if(key>=0)
							key &= 255;
					}
						
				}
				else if(key == 'c')
				{
					//ros::spinOnce();
					break;
				}
				//else if(key == 's')
				//	save = 1;
			}
            
            ros::spinOnce();
        }

        //waitKey(0);



    }
	}
	catch(int e){
		cout << "Error occured!" << endl;
	}
	
    
    destroyAllWindows();

    return 0;
}
Пример #10
0
void BinarizationViewer::showBinarizedImgs() {
    
    Mat srcBGRImg, srcHSVImg, srcYCrCbImg;
    Mat bgrChannelImgs[3], hsvChannelImgs[3], ycrcbChannelImgs[3];

    vector<string> channelNames = {};
    int trackbarInitValue = 128;
    namedWindow("Blue", CV_WINDOW_AUTOSIZE);
    namedWindow("Green", CV_WINDOW_AUTOSIZE);
    namedWindow("Red", CV_WINDOW_AUTOSIZE);
    namedWindow("Hue", CV_WINDOW_AUTOSIZE);
    namedWindow("Saturation", CV_WINDOW_AUTOSIZE);
    namedWindow("Value", CV_WINDOW_AUTOSIZE);
    namedWindow("Y", CV_WINDOW_AUTOSIZE);
    namedWindow("Cr", CV_WINDOW_AUTOSIZE);
    namedWindow("Cb", CV_WINDOW_AUTOSIZE);

    cvCreateTrackbar("B_Threshold", "Blue", &trackbarInitValue, 255, onBlueTrackbar);
    cvCreateTrackbar("G_Threshold", "Green", &trackbarInitValue, 255, onGreenTrackbar);
    cvCreateTrackbar("R_Threshold", "Red", &trackbarInitValue, 255, onRedTrackbar);
    cvCreateTrackbar("H_Threshold", "Hue", &trackbarInitValue, 255, onHueTrackbar);
    cvCreateTrackbar("S_Threshold", "Saturation", &trackbarInitValue, 255, onSaturationTrackbar);
    cvCreateTrackbar("V_Threshold", "Value", &trackbarInitValue, 255, onValueTrackbar);
    cvCreateTrackbar("Y_Threshold", "Y", &trackbarInitValue, 255, onYTrackbar);
    cvCreateTrackbar("Cr_Threshold", "Cr", &trackbarInitValue, 255, onCrTrackbar);
    cvCreateTrackbar("Cb_Threshold", "Cb", &trackbarInitValue, 255, onCbTrackbar);

    cvSetTrackbarPos("B_Threshold", "Blue", 128);
    cvSetTrackbarPos("G_Threshold", "Green", 128);
    cvSetTrackbarPos("R_Threshold", "Red", 128);
    cvSetTrackbarPos("H_Threshold", "Hue", 128);
    cvSetTrackbarPos("S_Threshold", "Saturation", 128);
    cvSetTrackbarPos("V_Threshold", "Value", 128);
    cvSetTrackbarPos("Y_Threshold", "Y", 128);
    cvSetTrackbarPos("Cr_Threshold", "Cr", 128);
    cvSetTrackbarPos("Cb_Threshold", "Cb", 128);

    _isShowing = true;
	while(_isShowing) {
        srcBGRImg = _cameraManager.getFrame();

        cvtColor(srcBGRImg, srcHSVImg, CV_BGR2HSV);
        cvtColor(srcBGRImg, srcYCrCbImg, CV_BGR2YCrCb);

        split(srcBGRImg, bgrChannelImgs);
        split(srcHSVImg, hsvChannelImgs);
        split(srcYCrCbImg, ycrcbChannelImgs);

        threshold(bgrChannelImgs[0], bgrChannelImgs[0], binarizationViewerBlueThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[1], bgrChannelImgs[1], binarizationViewerGgreenThreshold, 255, CV_THRESH_BINARY);
        threshold(bgrChannelImgs[2], bgrChannelImgs[2], binarizationViewerRedThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[0], hsvChannelImgs[0], binarizationViewerHueThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[1], hsvChannelImgs[1], binarizationViewerSaturationThreshold, 255, CV_THRESH_BINARY);
        threshold(hsvChannelImgs[2], hsvChannelImgs[2], binarizationViewerValueThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[0], ycrcbChannelImgs[0], binarizationViewerYThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[1], ycrcbChannelImgs[1], binarizationViewerCrThreshold, 255, CV_THRESH_BINARY);
        threshold(ycrcbChannelImgs[2], ycrcbChannelImgs[2], binarizationViewerCbThreshold, 255, CV_THRESH_BINARY);
        
        imshow("src", srcBGRImg);
        imshow("Blue", bgrChannelImgs[0]);
        imshow("Green", bgrChannelImgs[1]);
        imshow("Red", bgrChannelImgs[2]);
        imshow("Hue", hsvChannelImgs[0]);
        imshow("Saturation", hsvChannelImgs[1]);
        imshow("Value", hsvChannelImgs[2]);
        imshow("Y", ycrcbChannelImgs[0]);
        imshow("Cr", ycrcbChannelImgs[1]);
        imshow("Cb", ycrcbChannelImgs[2]);

		int key = waitKey(1);
        if(key == 27) break;
    }

    destroyAllWindows();
}
Пример #11
0
void MultipleProcess::run()
{

    int FLAGS = CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE;

    if (_input.size() != _process.size())
        return;

    if (_showOutput)
    {
        for (size_t i = 0; i < _input.size(); i++)
        {
            namedWindow(_windowName + to_string(i), FLAGS);
        }

    }
    if (matching)
        namedWindow(_matchinName, FLAGS);
    
    
    vector<Ptr<ProcessListenerWrapper>> wrappers;
    if (_mListener && _process.size())
    {
        for (size_t i = 0; i < _process.size(); i++)
        {
            Ptr<ProcessListenerWrapper> w = new ProcessListenerWrapper(i, _process[i], this);
            wrappers.push_back(w);
            cv::setMouseCallback(_windowName + to_string(i), MultipleProcess::mouseCallback, wrappers[i]);
        }
    }
    if (_mListener && matching)
        cv::setMouseCallback(_matchinName, Processor::mouseCallback, matching);
        
    if (!_input.size() && !_process.size())
        return;


    vector<long> frameN;
    Mat freezeFrame;
    bool freezed = true;
    bool running = true;
    int key = Keys::NONE;

    // initialize the freezeFrame
    bool allSequencesReady = true;
    vector<Mat>  freezedFrames;
    vector<bool> hasFrame;

    for (size_t i = 0; i < _input.size(); i++)
    {
        Mat tmp;
        bool _retrieved = _input[i]->getFrame(tmp, _startFrame);
        allSequencesReady = allSequencesReady && _retrieved;
        freezedFrames.push_back(tmp);
        hasFrame.push_back(_retrieved);
		long frame = 0;
		frame += _startFrame;
		frameN.push_back(frame);
    }

    while (running && allSequencesReady)
    {

        vector<Mat> frame(_input.size()), frameOut(_input.size());
        if (!freezed || key == Keys::n)
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                hasFrame[i] = _input[i]->getFrame(frame[i], 1);
                freezedFrames[i] = frame[i];
				frameN[i]++;
            }
        }
        else if (!freezed || key == Keys::p)
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                if (frameN[i] > 0)
                {
                    hasFrame[i] = _input[i]->getFrame(frame[i], -1);
                    freezedFrames[i] = frame[i];
                    frameN[i]--;
                }
            }
        }
        else
        {
            for( size_t i = 0; i < _input.size(); i++)
            {
                frame[i] = freezedFrames[i];
            }
        }

        bool allSequencesFinished = false;
        for (size_t i = 0; i < _input.size(); i++)
        {
            allSequencesFinished = allSequencesFinished || hasFrame[i];
        }

        if (allSequencesFinished)
        {

            for (size_t i = 0; i < _input.size(); i++)
            {

                if (_process.size())
                    _process[i]->operator()(frameN[i], frame[i], frameOut[i]);

                if (_showOutput && !frameOut[i].empty())
                    cv::imshow(_windowName + to_string(i), frameOut[i]);
                if (_output.size())
                    _output[i]->writeFrame(frameOut[i]);
            }
            if (matching)
            {
                Mat tmp, moutput;
                matching->operator()(0, tmp, moutput);
                cv::imshow(_matchinName, moutput);
            }
            key = Keys::NONE;

            try
            {
                key = waitKey(1);
            }
            catch (...)
            {
                //...
            }

            if (key == Keys::ESC)
            {
                running = false;
            }
            if (key == Keys::SPACE || _pause)
            {
                _pause = false;
                for (size_t i = 0; i < _input.size(); i++)
                {
                    freezedFrames[i] = frame[i];
                }
                freezed = !freezed;
            }
            if (_kListener && _process.size() && key != Keys::NONE)
            {
                for (size_t i = 0; i < _input.size(); i++)
                {
                    if (_activeWindow == i)
                        _process[i]->keyboardInput(key);
                }
            }
            
        }
        else
        {
            break;
        }
    }
    
    
    destroyAllWindows();
}
Пример #12
0
int _tmain(int argc, _TCHAR* argv[])
{
	if (InitEngine() < 0)
	{
		cout << "init failed" << endl;
		return -1;
	}

	string image_path = "..\\image\\download_image\\test";

	vector<string> image_list;
	get_image_list(image_path, image_list);


	int lv_cnt[5] = {0}, fail_cnt = 0;
	int lv_correct_cnt[5] = {0};
	double totalTime = 0;

	for (vector<string>::iterator it = image_list.begin(); it != image_list.end(); ++it)
	{
		string img_file = image_path + "\\" + *it;

		Mat img = imread(img_file, 4);
		if (img.data == NULL)
		{
			DeleteFile(img_file.c_str());
			continue;
		}

		double t = (double)getTickCount();

		char code[4] = {0};
		float conf[4] = {0};
		int ret = RecognizeCode((char*)(img_file.c_str()), code, conf);

		t = (double)getTickCount() - t;
		totalTime += t;

		if (ret >= 0)
		{
			cout << "[" << ret << "]" << "\t" << *it << endl;
			cout << "\t" << code[0] << code[1] << code[2] << code[3];
			cout << "\t" << "[" << conf[0] << " " << conf[1] << " " << conf[2] << " " << conf[3] << "]" << endl;

			char str[5];
			str[0] = code[0];str[1] = code[1];str[2] = code[2];str[3] = code[3];str[4]='\0';

			if (it->substr(6, 4) == string(str))
				lv_correct_cnt[ret]++;
			else
			{
				//imshow("code", img);
				//waitKey(0);
				//destroyAllWindows();
			}
			lv_cnt[ret]++;
		}
		else
		{
			cout << *it << " ";
			cout << "[" << ret << "]" << "\t" << "pass." << endl;
			//MoveFile(img_file.c_str(), (image_path + "\\@\\" + *it).c_str());
			fail_cnt++;
		}

		imshow("code", img);
		waitKey(0);
		destroyAllWindows();
	}

	cout << "FAIL: " << fail_cnt << endl;

	int total_cnt = 0, correct_cnt = 0;
	for (int i = 0; i < 5; i++)
	{
		total_cnt += lv_cnt[i];
		correct_cnt += lv_correct_cnt[i];

		cout << "Lv[" << i << "]: " << lv_correct_cnt[i] << "/" << lv_cnt[i] << " = " << lv_correct_cnt[i]/(float)lv_cnt[i] << endl;
	}
	total_cnt += fail_cnt;

	cout << "TOTAL: " << correct_cnt << "/" << total_cnt << " = " << correct_cnt/(float)total_cnt << endl;
	cout << "Time : " << totalTime/(double)getTickFrequency()*1000. << "/" << total_cnt << " " << 
		totalTime/(double)getTickFrequency()*1000./total_cnt << endl;

	if (ReleaseEngine() < 0)
		cout << "release failed" << endl;

	return 0;
}
Пример #13
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
Пример #14
0
int process(VideoCapture& capture) {
    long captureTime;
    cout << "Press q or escape to quit!" << endl;

    CvFont infoFont;
    cvInitFont(&infoFont, CV_FONT_HERSHEY_SIMPLEX, 1, 1);

    namedWindow(VIDEO_WINDOW_NAME, CV_WINDOW_AUTOSIZE);
    namedWindow(ERODE_PREVIEW_WIN_NAME, CV_WINDOW_NORMAL);
    resizeWindow(ERODE_PREVIEW_WIN_NAME, 320, 240);
    ControlsWindow* controlsWindow = new ControlsWindow();

    if(fileExists(preferenceFileName)) {
        loadSettings(controlsWindow, (char*)preferenceFileName);
    }

    Mat frame;
    while (true) {
        capture >> frame;
        captureTime = (int)(getTickCount()/getTickFrequency())*1000;

        if (frame.empty())
            break;

        int target_width = 320;
        int height = (target_width/capture.get(3 /*width*/)) * capture.get(4 /*height*/);
        resize(frame, frame, Size(target_width, height));

        if (controlsWindow->getBlurDeviation() > 0) {
            GaussianBlur(frame, frame, Size(GAUSSIAN_KERNEL, GAUSSIAN_KERNEL), controlsWindow->getBlurDeviation());
        }

        //Apply brightness and contrast
        frame.convertTo(frame, -1, controlsWindow->getContrast(), controlsWindow->getBrightness());

        Mat maskedImage = thresholdImage(controlsWindow, frame);

        Mat erodedImage = erodeDilate(maskedImage, controlsWindow);

        Mat erodedImageBinary;

        cvtColor(erodedImage, erodedImageBinary, COLOR_BGR2GRAY);
        threshold(erodedImageBinary, erodedImageBinary, 0, 255, CV_THRESH_BINARY);

        if(controlsWindow->getInvert()) {
            erodedImageBinary = 255 - erodedImageBinary;
        }

        cv::SimpleBlobDetector::Params params;
        params.minDistBetweenBlobs = 50.0f;
        params.filterByInertia = false;
        params.filterByConvexity = false;
        params.filterByColor = true;
        params.filterByCircularity = false;
        params.filterByArea = true;
        params.minArea = 1000.0f;
        params.maxArea = 100000.0f;
        params.blobColor = 255;

        vector<KeyPoint> centers;
        vector<vector<Point>> contours;
        ModBlobDetector* blobDetector = new ModBlobDetector(params);

        vector<vector<Point>> contourHulls;
        vector<RotatedRect> contourRects;
        blobDetector->findBlobs(erodedImageBinary, erodedImageBinary, centers, contours);
        for(vector<Point> ctpts : contours) {
            vector<Point> hull;
            convexHull(ctpts, hull);
            contourHulls.push_back(hull);
            contourRects.push_back(minAreaRect(hull));
        }
#ifdef DEBUG_BLOBS
        drawContours(frame, contours, -1, Scalar(128,255,128), 2, CV_AA);
        drawContours(frame, contourHulls, -1, Scalar(255, 128,0), 2, CV_AA);
        int ptnum;
        for(KeyPoint pt : centers) {
            Scalar color(255, 0, 255);
            circle(frame, pt.pt, 5
                   , color, -1 /*filled*/, CV_AA);
            circle(frame, pt.pt, pt.size, color, 1, CV_AA);
            ptnum++;
        }
#endif
        for(RotatedRect rr : contourRects) {
            Point2f points[4];
            rr.points(points);
            float side1 = distance(points[0], points[1]);
            float side2 = distance(points[1], points[2]);

            float shortestSide = min(side1, side2);
            float longestSide = max(side1, side2);
            float aspectRatio = longestSide/shortestSide;
            int b = 0;
            bool isTape = objInfo.aspectRatio == 0 ? false :
                          abs(objInfo.aspectRatio - aspectRatio) < 0.2*objInfo.aspectRatio;
            /*
             * TODO
             * Make a list of possible tape candidates
             * Use tape candidate with smallest difference in ratio to the real ratio as the tape
             */
            if(isTape) {
                b = 255;
                string widthText = "Width (px): ";
                widthText.append(toString(longestSide));
                string heightText = "Height (px): ";
                heightText.append(toString(shortestSide));
                string rotText = "Rotation (deg): ";
                rotText.append(toString(abs((int)rr.angle)));
                string distText;
                if(camSettings.focalLength == -1) {
                    distText = "Focal length not defined";
                } else {
                    float dist = objInfo.width * camSettings.focalLength / longestSide;
                    distText = "Distance (cm): ";
                    distText.append(toString(dist));
                }
                putText(frame, widthText, Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, heightText, Point(0, 40), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, rotText, Point(0, 60), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, distText, Point(0, 80), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
            }

            rotated_rect(frame, rr, Scalar(b, 0, 255));
            if(isTape)break;
        }
        if(objInfo.aspectRatio == 0) {
            putText(frame, "Invalid object info (object.xml)", Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
        }
        delete blobDetector;

        imshow(ERODE_PREVIEW_WIN_NAME, erodedImageBinary);

        imshow(VIDEO_WINDOW_NAME, frame);

        //int waitTime = max((int)(((1.0/framerate)*1000)
        //                   - ((int)(getTickCount()/getTickFrequency())*1000 - captureTime))
        //                   , 1);
        char key = (char)waitKey(1);
        switch (key) {
        case 'q':
        case 'Q':
        case 27: //escape
            saveSettings(controlsWindow, (char*)preferenceFileName);
            return 0;
        default:
            break;
        }
        std::this_thread::yield();
    }

    saveSettings(controlsWindow, (char*)preferenceFileName);
    delete(controlsWindow);

    destroyAllWindows();
    return 0;
}
void AerialCameraStitch::stitchCameraImages(){
	cv::Stitcher imgstitcher = Stitcher::createDefault(true);
	vector< Mat > tmp_uav_cam_img;
		Mat out_stitch_img;

		Mat tmp_img1;
		Mat tmp_img2;
		Mat tmp_img3;

		Mat tmp_img1_;
		Mat tmp_img2_;
		Mat tmp_img3_;

		opt_pose = false;

		ROS_INFO_STREAM("STITCHING STARTED");

		try{
			destroyAllWindows();
			tmp_img1 = uav1_cam_img;
			tmp_img2 = uav2_cam_img;
			tmp_img3 = uav3_cam_img;


			cv::resize(tmp_img1, tmp_img1_, cv::Size2i(tmp_img1.cols/2, tmp_img1.rows/2));
			cv::resize(tmp_img2, tmp_img2_, cv::Size2i(tmp_img2.cols/2, tmp_img2.rows/2));
			cv::resize(tmp_img3, tmp_img3_, cv::Size2i(tmp_img3.cols/2, tmp_img3.rows/2));

			tmp_img1 = tmp_img1_;
			tmp_img2 = tmp_img2_;
			tmp_img3 = tmp_img3_;

			tmp_uav_cam_img.push_back(tmp_img1);
			tmp_uav_cam_img.push_back(tmp_img2);
			tmp_uav_cam_img.push_back(tmp_img3);


			ROS_INFO_STREAM("UAV 1 : "<<tmp_uav_cam_img[0].size());
			ROS_INFO_STREAM("UAV 2 : "<<tmp_uav_cam_img[1].size());
			ROS_INFO_STREAM("UAV 3 : "<<tmp_uav_cam_img[2].size());


			unsigned long AAtime=0, BBtime=0; //check processing time
			AAtime = getTickCount(); //check processing time

			Stitcher::Status status = imgstitcher.stitch(tmp_uav_cam_img, out_stitch_img);


			 BBtime = getTickCount(); //check processing time
			 printf("Time Taken: %.2lf sec \n",  (BBtime - AAtime)/getTickFrequency() ); //check processing time


			cv::Size out_size =  out_stitch_img.size();
			int out_row = out_size.height;
			int out_cols = out_size.width;

			 if (status != Stitcher::OK || (out_row == 1 && out_cols == 1))
				 {
				 	 ROS_INFO_STREAM("IMAGES DIDNT STITCH - Not Enough Common Features Detected : "<< out_stitch_img.size());

				 	 ROS_INFO_STREAM("Increasing height to optimum level, if possible");

				 	 opt_pose = true;
				 	 uav_int_pose = uavpose_z + 5;

				 	 ROS_INFO_STREAM("Optimal Height : " <<  uav_int_pose);
				 	 ROS_INFO_STREAM("Translating ... ");

				 }else{
					 ROS_INFO_STREAM("STITCHING RESULT: "<< out_size);
					 ROS_INFO_STREAM("SUCCESSFUL ");

					 namedWindow("Stitching Result", WINDOW_AUTOSIZE);
					 imshow("Stitching Result", out_stitch_img);

				 }
		}catch(cv::Exception& e){
			const char* err_msg = e.what();
			ROS_INFO_STREAM("SOMETHING WENT WRONG!!!");
			ROS_INFO_STREAM("exception caught : " << err_msg);

		}
		 waitKey(0);

		 out_stitch_img.release();
		 tmp_img1.release();
		 tmp_img2.release();
		 tmp_img3.release();

		 tmp_img1_.release();
		 tmp_img2_.release();
		 tmp_img3_.release();


		 tmp_uav_cam_img[0].release();
		 tmp_uav_cam_img[1].release();
		 tmp_uav_cam_img[2].release();



}
Пример #16
0
int showImages (unsigned char *data, int n, int m, int nt)
{
	Mat img_max_gauss, img_gauss_max, img_max_bm3d, img_max;
	Mat imgc_max_gauss, imgc_gauss_max, imgc_max_bm3d, imgc_max;

	Mat img_max_gauss_canny, img_gauss_max_canny, img_max_bm3d_canny, img_max_canny;
	Mat imgc_max_gauss_canny, imgc_gauss_max_canny, imgc_max_bm3d_canny, imgc_max_canny;

	Mat img_max_gauss_watershed, img_gauss_max_watershed, img_max_bm3d_watershed, img_max_watershed;

	img_max.create (n, m, CV_8UC1);
	img_max.setTo (0);
	img_gauss_max.create (n, m, CV_8UC1);
	img_gauss_max.setTo (0);

	for (int i = 0; i < nt; i++) {
		
		/*Mat img (n, m, imgtype, data + i * n * m), grayImg;
		img = min (img, 400);
		double minVal, maxVal;
		minMaxLoc (img, &minVal, &maxVal); //find minimum and maximum intensities
		img.convertTo (img, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));

		img_max = max (img, img_max);
		GaussianBlur (img, img, cv::Size (3, 3), 1.0, 1.0);
		img_gauss_max = max (img, img_gauss_max);*/
	}

	GaussianBlur (img_max, img_max_gauss, cv::Size (3, 3), 1.0, 1.0);

	applyColorMap (img_max, imgc_max, COLORMAP_JET);
	applyColorMap (img_max_gauss, imgc_max_gauss, COLORMAP_JET);
	applyColorMap (img_gauss_max, imgc_gauss_max, COLORMAP_JET);

	//while (true) {
	//scanf ("%lf %lf", &lowThreshold, &uppThreshold);

	//do_canny (img_max, img_max_canny);
	//do_canny (img_gauss_max, img_gauss_max_canny);
	/*img_max_watershed.create (n, m, CV_32SC1);
	img_max_watershed.setTo (0);*/
	//ex_watershed (imgc_max);
	//watershed (img_gauss_max, img_gauss_max_watershed);

	//GaussianBlur (img_max, img_max_gauss, cv::Size (9, 9), 1.0, 1.0);
	//do_canny (img_max_gauss, img_max_gauss_canny);
	//watershed (img_max_gauss, img_max_gauss_watershed);
	/*

	imshow ("0. bm3d 1. max 2. jet", imgc_max);


	imshow ("0. bm3d 1. max 2. gauss: 3. jet", imgc_max_gauss);


	imshow ("0. bm3d 1. gauss 2. max: ", imgc_gauss_max);
	*/

	// Canny view
	/*
	imshow ("1. max 2. canny", img_max_canny);
	imshow ("1. max 2. gauss 3. canny", img_max_gauss_canny);
	imshow ("1. gauss 2. max 3. canny ", img_gauss_max_canny);
	*/
	/*double minVal, maxVal;
	img_max_watershed += 1;
	minMaxLoc (img_max_watershed, &minVal, &maxVal);
	Mat imgc_watershed;
	img_max_watershed.convertTo (imgc_watershed, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));*/
	//applyColorMap (imgc_watershed, imgc_watershed, COLORMAP_JET);

	// Watershed view
	//imshow ("1. max 2. watershed", imgc_watershed);
	/*imshow ("1. max 2. gauss 3. watershed", img_max_gauss_watershed);
	imshow ("1. gauss 2. max 3. watershed ", img_gauss_max_watershed);*/
	//int key = waitKey ();
	//}
	//int x = (int)string (matfilename).find ('.');
	//string file_name = string (matfilename).substr (0, x);
	//CreateDirectory (file_name.c_str (), 0);
	//CreateDirectory (, 0);
	//fs::create_directories ((file_name + "\\Max"));

	//string path = matfiledir + "\\" + file_name + "\\Max";
	//file_name = "";
	Mat img_thresh;
	//threshold (img_max, img_thresh, threshold_value, threshold_max_value, THRESH_TOZERO | CV_THRESH_OTSU);
	//imshow ("some", img_max);	
	imshow ("max thr", img_thresh);
	//threshold (img_max_gauss, img_thresh, 0, threshold_max_value, THRESH_TOZERO | CV_THRESH_OTSU);
	imshow ("max gauss thr", img_thresh);
	//threshold (img_gauss_max, img_thresh, 0, threshold_max_value, THRESH_TOZERO | CV_THRESH_OTSU);
	imshow ("gauss max thr", img_thresh);
	/*
	imwrite (path + "\\" + "1_max__" + file_name + ".bmp", img_max);
	imwrite (path + "\\" + "2_max_gauss__" + file_name + ".bmp", img_max_gauss);
	imwrite (path + "\\" + "3_gauss_max__" + file_name + ".bmp", img_gauss_max);

	imwrite (path + "\\" + "1_max_jet__" + file_name + ".bmp", imgc_max);
	imwrite (path + "\\" + "2_max_gauss_jet__" + file_name + ".bmp", imgc_max_gauss);
	imwrite (path + "\\" + "3_gauss_max_jet__" + file_name + ".bmp", imgc_gauss_max);

	Mat img_max_thr, img_max_gauss_thr, img_gauss_max_thr;

	my_threshold (img_max, img_max_thr);
	my_threshold (img_max_gauss, img_max_gauss_thr);
	my_threshold (img_gauss_max, img_gauss_max_thr);

	imwrite (path + "\\" + "1_max_thr__" + file_name + ".bmp", img_max_thr);
	imwrite (path + "\\" + "2_max_gauss_thr__" + file_name + ".bmp", img_max_gauss_thr);
	imwrite (path + "\\" + "3_gauss_max_thr__" + file_name + ".bmp", img_gauss_max_thr);
	*/
	/*// Canny save
	imwrite (path + "\\" + "1_max_canny__" + file_name + ".bmp", img_max_canny);
	imwrite (path + "\\" + "2_max_gauss_canny__" + file_name + ".bmp", img_max_gauss_canny);
	imwrite (path + "\\" + "3_gauss_max_canny__" + file_name + ".bmp", img_gauss_max_canny);*/

	int key = waitKey ();

	destroyAllWindows ();
	return 0;
}
Пример #17
0
int show_flow (unsigned char *data, int n, int m, int nt, int img_type, int type_size)
{
	Point2f point;
	bool addRemovePt = false;

	VideoCapture cap;
	TermCriteria termcrit (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
	Size subPixWinSize (10, 10), winSize (31, 31);

	const int MAX_COUNT = 300;
	bool needToInit = false;
	bool nightMode = false;

	Mat gray, prevGray, image;
	vector<Point2f> points[2];
	Mat img_max_gauss, img_gauss_max, img_max;
	Mat imgc_max_gauss, imgc_gauss_max, imgc_max, img_gauss_max_thr;

	img_max.create (n, m, CV_8UC1);
	img_max.setTo (0);
	img_gauss_max.create (n, m, CV_8UC1);
	img_gauss_max.setTo (0);

	for (int i = 0; i < nt; i++) {
		Mat img (n, m, img_type, data + i * n * m * type_size);
		img = min (img, 400);
		double minVal, maxVal;
		minMaxLoc (img, &minVal, &maxVal); //find minimum and maximum intensities
		img.convertTo (img, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));

		img_max = max (img, img_max);
		GaussianBlur (img, img, cv::Size (3, 3), 1.0, 1.0);
		img_gauss_max = max (img, img_gauss_max);
	}

	threshold (img_gauss_max, img_gauss_max_thr, 0, 255, THRESH_BINARY | CV_THRESH_OTSU);
	Mat img_gauss_max_thr_res;
	//resize (img_gauss_max_thr, img_gauss_max_thr_res, cv::Size (img_gauss_max_thr.cols * 3, img_gauss_max_thr.rows * 3), 0, 0, 0);
	Mat prev_gray, prev_img;
	//int x = (int)string (matfilename).find ('.');
	//string file_name = string (matfilename).substr (0, x);
	//string path = matfiledir + "\\" + file_name + "\\flow";
	//imwrite (path + "\\img_big\\img_gauss_max_thr_res" + +".bmp", img_gauss_max_thr_res);
	for (int i = 0; i < nt; i++) {
		printf ("i : %d\n", i);
		/*Mat frame (n, m, imgtype, data + i * n * m * type_size);
		frame = min (frame, 100);
		double minVal, maxVal;
		minMaxLoc (frame, &minVal, &maxVal);
		frame.convertTo (frame, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));

		GaussianBlur (frame, frame, cv::Size (3, 3), 1.0, 1.0);
		//Mat res_img;
		//resize (frame, res_img, cv::Size (frame.cols * 3, frame.rows * 3));
		//res_img.copyTo (frame);
		Mat img, image;
		applyColorMap (frame, img, COLORMAP_JET);
		
		img.copyTo (image, img_gauss_max_thr);*/
		//frame.copyTo (gray, img_gauss_max_thr);
		/*Mat img;
		bitwise_and (image, img_gauss_max_thr, img);
		image = img;*/
		//cvtColor (image, gray, COLOR_BGR2GRAY);
		Mat flow, cflow;
		/*if (prev_gray.data != nullptr) {
		calcOpticalFlowFarneback (prev_gray, gray, flow, 0.5, 5, 10, 1, 5, 1.2, 0);
		//cvtColor (prev_img, cflow, COLOR_GRAY2BGR);
		prev_img.copyTo(cflow);
		draw_optical_flow (flow, cflow, 4, Scalar (0, 100, 0), Scalar (0, 0, 255));
		//imshow ("Flow", cflow);
		//imshow ("gray", gray);
		//imshow ("prev_gray", prev_gray);
		char c = (char)waitKey (200);
		}

		gray.copyTo (prev_gray);
		image.copyTo (prev_img);*/

		//char c = (char)waitKey (200);
		/*
		if (nightMode)
		image = Scalar::all (0);

		if (needToInit) {
		// automatic initialization
		goodFeaturesToTrack (gray, points[0], MAX_COUNT, 0.01, 10, Mat (), 3, 0, 0.04);
		cornerSubPix (gray, points[0], subPixWinSize, Size (-1, -1), termcrit);
		addRemovePt = false;
		} else if (!points[0].empty ()) {
		vector<uchar> status;
		vector<float> err;
		if (prevGray.empty ())
		gray.copyTo (prevGray);
		calcOpticalFlowPyrLK (prevGray, gray, points[0], points[1], status, err);
		//, winSize,3, termcrit, 0, 0.001);
		size_t i, k;
		for (i = k = 0; i < points[1].size (); i++) {
		if (!status[i])
		continue;

		points[1][k++] = points[1][i];
		if (points->size() > i) {
		line (image, points[1][i], points[0][i], Scalar(255, 255, 255), 2);
		}
		circle (image, points[1][i], 2, Scalar (0, 0, 0), -1, 8);
		}
		points[1].resize (k);
		}

		if (addRemovePt && points[1].size () < (size_t)MAX_COUNT) {
		vector<Point2f> tmp;
		tmp.push_back (point);
		cornerSubPix (gray, tmp, winSize, cvSize (-1, -1), termcrit);
		points[1].push_back (tmp[0]);
		addRemovePt = false;
		}

		needToInit = false;
		imshow ("LK Demo", image);
		if (i == 0) char c = (char)waitKey ();*/
		//imwrite (path + "\\img_big\\" + std::to_string (i) + ".bmp", image);
		//imwrite (path + "\\img_\\" + std::to_string (i) + ".bmp", img);
		/*char c = (char)waitKey (0);
		if (c == 27)
		break;
		switch (c) {
		case 'r':
		needToInit = true;
		break;
		case 'c':
		points[0].clear ();
		points[1].clear ();
		break;\
		case 'n':
		nightMode = !nightMode;
		break;
		case 's':
		imwrite (path + "\\" + std::to_string (i) + ".bmp", prev_img);
		}*/
	}
	destroyAllWindows ();
	return 0;
}
Пример #18
0
void* camera(void* arg) {
	//pFormatCtx=(AVFormatContext *)arg;
	char key;
	drawing=false;
	Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
	pthread_mutex_init(&mutexVideo, NULL);
	liste.suivant=NULL;
#if output_video == ov_remote_ffmpeg
	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);
#else	
	VideoCapture cap(0); //capture video webcam
#endif



#if output_video != ov_remote_ffmpeg

	if (!cap.isOpened()) {
		cout << "Impossible de lire le flux de la camera" << endl;
		return NULL;
	}
	Mat frame;
	cap >> frame;
	fSize.width = frame.cols;
	fSize.height = frame.rows;
#endif

	// Initialise les fenetres
	namedWindow(winDetected, 1);
	namedWindow(winOutputVideo, 1);

	//Creer une image noir de taille de notre image tmp
	Mat imgLines = Mat::zeros(fSize.height, fSize.width, CV_8UC3);

	while (true) {

#if output_video != ov_remote_ffmpeg
		bool bSuccess = cap.read(imgOriginal); // Nouvelle capture
		if (!bSuccess) {
			cout << "Impossible de lire le flux video" << endl;
			break;
		}
#else
		pthread_mutex_lock(&mutexVideo);
		memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
		pthread_mutex_unlock(&mutexVideo);
		imgOriginal = cv::cvarrToMat(img, true);
#endif
		pthread_t mtId,ocId;
		//Appel aux threads de tracking
		pthread_create(&mtId, NULL, &matchTemplate, NULL);
		pthread_create(&ocId, NULL, &opencv, NULL);
		
		pthread_join(mtId,NULL);
		pthread_join(ocId,NULL);

		//Fonction permettant d'interpreter les résultats des deux tracking
		Ball.setRealPos();

		// Genere la fenetre de repere 
		imgLines.setTo(Scalar(255, 255, 255));
		drawCross(imgLines, fSize.width / 2, fSize.height / 2, Scalar(0, 0, 255));
		drawCross(imgLines, posX, posY, Scalar(0, 255, 0));

		imgOriginal = imgOriginal & imgLines; // Croise les resultats à la fenetre de sortie //

		// Affichage des fenetre //
		imshow(winDetected, imgDetection);			//Pour montrer l image avec le masque
		//imshow(winRepere, imgLines);				//Pour montrer la fenetre de repere
		imshow(winOutputVideo, imgOriginal);		//Image d origine
		string Action = "Mouvement a effectuer : ";
		ObjCoord tmp = Ball.getRealPos();
		cout << "x " << tmp.Xcoord << " y " << tmp.Ycoord << " z " << tmp.Zcoord << endl;
		/*
		if(tmp.Zcoord == -1){
			Action += "Recule, "; Ball.pitch = 0.05f;
		}
		else if(tmp.Zcoord == 1){
			Action += "Avance, "; Ball.pitch = -0.05f;
		}
		else
		{
			Ball.pitch = 0;
		}
		*/
		if (tmp.Xcoord <= 35.0 && tmp.Xcoord != 0) {
			Ball.yaw = -0.2f;
			Action += "Gauche ("+ to_string(Ball.yaw)+"%), ";
		} else if (tmp.Xcoord >= 65.0) {
			Ball.yaw = 0.2f;
			Action += "Droite ("+ to_string(Ball.yaw)+"%), ";
		}
		else
		{
			Ball.yaw = 0;	
		}
		if (tmp.Ycoord >= 65.0) {
			Action += "Descendre";  Ball.gaz = -0.2f;
		} else if (tmp.Ycoord <= 35.0 && tmp.Ycoord != 0) {
			Action += "Monter";    Ball.gaz = 0.2f;
		}
		else
		{
			Ball.gaz = 0;
		}
		/*if(Ball.pitch != 0) {
			Ball.roll = Ball.yaw / 2;
			Ball.yaw = 0;
		}*/
		if(tmp.Xcoord == 0 && tmp.Ycoord == 0 && tmp.Zcoord == 0)
		{
			Ball.roll = Ball.pitch = Ball.gaz = Ball.yaw = 0;
			
		}
		if(Ball.pitch == 0)
			AtCmd::sendMovement(0, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		else
			AtCmd::sendMovement(3, Ball.roll, Ball.pitch, Ball.gaz, Ball.yaw);
		//cout << Action << endl;
		key=waitKey(10);
		if(key == 10)
		{
			enVol=true;
			key=-1;
		}
		else if (key != -1) //Attends qu'une touche soit presser pour quitter
		{
			break;
		}
	}
	stopTracking=true;
	destroyAllWindows();
	return NULL;
}
Пример #19
0
void MainWindow::decrypt()
{
    FILE *f = fopen(filename.c_str(), "rb");
    if (!f) {
        QMessageBox::critical(0, "Error", "No image file is loaded!!");
        return;
    }

    isEncryption = false;

    QString text = QInputDialog::getText(this, "Password",
                     "Please enter your password for decryption",
                      QLineEdit::Password, QString());

    string pwd = string((const char *)text.toLocal8Bit());
    MD5 md5(pwd);
    key = md5.getDigest();

    AES aes(key);

    unsigned char info[16] = "";
    fseek(f, -16, SEEK_END);
    fread(info, 1, 16, f);

    // verify key and info
    aes.InvCipher(info);
    if (memcmp(info+8, "SEAcret", 7)) {
        QMessageBox::critical(0, "Error", "Incorrect password or there is no secret in the image");
        return;
    }

    int buf_size[2];
    memcpy(buf_size, info, sizeof(int)*2);
    fseek(f, -16-buf_size[0]-buf_size[1], SEEK_END);
    unsigned char *temp;

    vector<unsigned char> mask_buf(buf_size[0]);
    vector<unsigned char> secret_buf(buf_size[1]);
    fread(&mask_buf[0], 1, buf_size[0], f);
    fread(&secret_buf[0], 1, buf_size[1], f);
    fclose(f);

    for (int i = 0; i < buf_size[0]; i+=16) {
        temp = &mask_buf[0]+i;
        aes.InvCipher(temp);
    }

    Mat mask = imdecode((Mat)mask_buf, CV_LOAD_IMAGE_GRAYSCALE);
    mask = mask > 128;

    for (int i = 0; i < buf_size[1]; i+=16) {
        temp = &secret_buf[0]+i;
        aes.InvCipher(temp);
    }

    Mat secret = imdecode((Mat)secret_buf, 1);

    _dst = imread(filename, 1);
    secret.copyTo(_dst, mask);

    imshow("Result", _dst);
    waitKey(0);
    destroyAllWindows();
}
Пример #20
0
int main(int argc, char *argv[])
{
	if (argc != 4)
	{
		cout << "Usage: " << endl;
		cout << "ACCV2009.exe Folder imfn1 imfn2" << endl;
		return -1;
	}

	string folder = argv[1];
	string imfn1  = argv[2];
	string imfn2  = argv[3];
	string outfolder = "../output";
	_mkdir(outfolder.c_str());

	Mat im1 = imread(folder + imfn1 + ".png", 1);
	Mat im2 = imread(folder + imfn2 + ".png", 1);

	int width = im1.cols;
	int height = im1.rows;

	//颜色空间转换
	Mat im1_H, im1_S, im1_I;
	Mat im2_H, im2_S, im2_I;
	//CvtColorBGR2HSI(im1, im1_H, im1_S, im1_I);
	//CvtColorBGR2HSI(im2, im2_H, im2_S, im2_I);

	cv_CvtColorBGR2HSI(im1, im1_H, im1_S, im1_I);
	cv_CvtColorBGR2HSI(im2, im2_H, im2_S, im2_I);
	
	int  sigmaS = 5;
	float sigmaR = 10;
	int minR = 800;

	vector<int> labels1(0), labels2(0);

//#define  RQ_DEBUG
#ifdef   RQ_DEBUG
	int regionum1, regionum2;
	DoMeanShift(im1, sigmaS, sigmaR, minR, labels1, regionum1);
	DoMeanShift(im2, sigmaS, sigmaR, minR, labels2, regionum2);
	cout << endl;

	saveLabels(labels1, width, height, outfolder+"/labels_" + imfn1 + ".txt");
	saveLabels(labels2, width, height, outfolder+"/labels_" + imfn2 + ".txt");
#else
	int regionum1, regionum2;
	readLabels(outfolder+"/labels_" + imfn1 + ".txt", width, height, labels1, regionum1);
	readLabels(outfolder+"/labels_" + imfn2 + ".txt", width, height, labels2, regionum2);	
#endif
	
	string siftmatchfn = folder + "matches_sift.txt";
	vector<Point2f> features1(0), features2(0);
	ReadSiftMatches(siftmatchfn, features1, features2);

	vector<vector<Point2f>> matches1(0), matches2(0);
	vector<vector<Point2f>> pixelTable2(0);
	ComputeRegionMatches(labels2, regionum2, width, features1, features2, matches1, matches2);
	cout << "all regions have matches." << endl << endl;

	//显示每个区域的matches	: 显示一次就好
	
	vector<float> DeltaH(0), DeltaS(0), DeltaI(0);
	RegionDeltaColor(im1_H, im2_H, matches1, matches2, DeltaH);
	RegionDeltaColor(im1_S, im2_S, matches1, matches2, DeltaS);
	RegionDeltaColor(im1_I, im2_I, matches1, matches2, DeltaI);
	
	Mat new_im2_H, new_im2_S, new_im2_I;
	CorrectColor(im2_H, labels2, DeltaH, new_im2_H);
	CorrectColor(im2_S, labels2, DeltaS, new_im2_S);
	CorrectColor(im2_I, labels2, DeltaI, new_im2_I);

	Mat new_im2;
	//CvtColorHSI2BGR(new_im2_H, new_im2_S, new_im2_I, new_im2);
	cv_CVtColorHSI2BGR(new_im2_H, new_im2_S, new_im2_I, new_im2);

	cout << "done." << endl;

	imshow("new im2", new_im2);
	waitKey(0);
	destroyAllWindows();

	string savefn = outfolder + "/accv2009_" + imfn2 + ".png";
	cout << "save " << savefn << endl;
	imwrite(savefn, new_im2);
}
Пример #21
0
Vision::~Vision()
{
	destroyAllWindows();
	Vision::m_instance = NULL;
	CloseFlyCapCamera();
}
Пример #22
0
int main(int argc, char* argv[])
{
	// Build color lookup table for depth display
	Mat colorLut = Mat(cv::Size(256, 1), CV_8UC3);
	for(int i = 0; i < 256; i++)
		colorLut.at<Vec3b>(i) = (i==0) ? Vec3b(0, 0, 0) : HSV2RGB(i/256.0f, 1, 1);

	// Open Dense3D
	Dense3DMTInstance dense3d;
	if(!Dense3DOpen(&dense3d))
	{
		printf("Could not open Dense3DMT library\n");
		return 1;
	}

	DUOInstance duo = GetDUOInstance(dense3d);

	char tmp[260];
	GetDUODeviceName(duo,tmp);
	printf("DUO Device Name:      '%s'\n", tmp);
	GetDUOSerialNumber(duo, tmp);
	printf("DUO Serial Number:    %s\n", tmp);
	GetDUOFirmwareVersion(duo, tmp);
	printf("DUO Firmware Version: v%s\n", tmp);
	GetDUOFirmwareBuild(duo, tmp);
	printf("DUO Firmware Build:   %s\n", tmp);
	printf("DUOLib Version:       v%s\n", GetDUOLibVersion());
	printf("Dense3DMT Version:    v%s\n", Dense3DGetLibVersion());

	// Set the Dense3D license (visit https://duo3d.com/account)
	if(!SetDense3DLicense(dense3d, "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX")) // <-- Put your Dense3D license
	{
		printf("Invalid or missing Dense3D license. To get your license visit https://duo3d.com/account\n");
		// Close Dense3D library
		Dense3DClose(dense3d);
		return 1;
	}
	// Set the image size
	if(!SetDense3DImageInfo(dense3d, WIDTH, HEIGHT, FPS))
	{
		printf("Invalid image size\n");
		// Close Dense3D library
		Dense3DClose(dense3d);
		return 1;
	}

	// Set Dense3D parameters
	Dense3DParams params;
	params.scale = 0;
    params.mode = 0;
    params.numDisparities = 2;
    params.sadWindowSize = 6;
    params.preFilterCap = 28;
    params.uniqenessRatio = 27;
    params.speckleWindowSize = 52;
    params.speckleRange = 14;
	if(!SetDense3Params(dense3d, params))
    {
        printf("GetDense3Params error\n");
		// Close Dense3D library
		Dense3DClose(dense3d);
        return 1;
    }

	// Queue used to receive Dense3D frames
	Dense3DFrameQueue d3dq;

	// Start DUO capture and Dense3D processing
	if(!Dense3DStart(dense3d, [](const PDense3DFrame pFrameData, void *pUserData)
					{
                        D3DFrame frame;
                        Size frameSize(pFrameData->duoFrame->width, pFrameData->duoFrame->height);
                        frame.leftImg = Mat(frameSize, CV_8U, pFrameData->duoFrame->leftData);
                        frame.rightImg = Mat(frameSize, CV_8U, pFrameData->duoFrame->rightData);
                        frame.disparity = Mat(frameSize, CV_32F, pFrameData->disparityData);
                        frame.depth = Mat(frameSize, CV_32FC3, pFrameData->depthData);
                        ((Dense3DFrameQueue*)pUserData)->push(frame);
					}, &d3dq))
	{
		printf("Dense3DStart error\n");
		// Close Dense3D library
		Dense3DClose(dense3d);
		return 1;
	}

	// Set exposure, LED brightness and camera orientation
	SetDUOExposure(duo, 85);
	SetDUOLedPWM(duo, 28);
	SetDUOVFlip(duo, true);

	// Run capture loop until <Esc> key is pressed
	while((cvWaitKey(1) & 0xff) != 27)
	{
		D3DFrame d3DFrame;
		if(!d3dq.pop(d3DFrame))
			continue;

		Mat disp8;
		d3DFrame.disparity.convertTo(disp8, CV_8UC1, 255.0/(params.numDisparities*16));
		Mat rgbBDisparity;
		cvtColor(disp8, rgbBDisparity, COLOR_GRAY2BGR);
		LUT(rgbBDisparity, colorLut, rgbBDisparity);

		// Display images
		imshow("Left Image", d3DFrame.leftImg);
		imshow("Right Image", d3DFrame.rightImg);
		imshow("Disparity Image", rgbBDisparity);
	}
	destroyAllWindows();

	Dense3DStop(dense3d);
	Dense3DClose(dense3d);
	return 0;
}
AerialCameraStitch::~AerialCameraStitch(){
	destroyAllWindows();
}
Пример #24
0
Chroma_processing::~Chroma_processing()
{
	//destroy GUI windows
	destroyAllWindows();
	
}
Пример #25
0
unsigned test(CClassifier * cl, CCaptcha * captcha)
{
	char captcha_predict[nic];
	unsigned u, v, max_area_ind;
	double area, max_area;
	Mat img = (* captcha)();
	Size size = img.size();
	Mat kernel(3, 3, CV_64FC1);
	Mat blr, median, filter, lpl, sum, temp, nimg, thr;
	vector<Mat> ch;
	vector<vector<Point> > cnt;

	kernel.at<double>(0, 0) = -0.1;
	kernel.at<double>(0, 1) = -0.1;
	kernel.at<double>(0, 2) = -0.1;
	kernel.at<double>(1, 0) = -0.1;
	kernel.at<double>(1, 1) = 2;
	kernel.at<double>(1, 2) = -0.1;
	kernel.at<double>(2, 0) = -0.1;
	kernel.at<double>(2, 1) = -0.1;
	kernel.at<double>(2, 2) = -0.1;

	medianBlur(img, median, 5);
	filter2D(median, filter, -1, kernel);
	Laplacian(filter, lpl, CV_32F, 5);
	threshold(lpl, thr, 150, 255, THRESH_BINARY);

	split(thr, ch);
	add(ch[0], ch[1], temp, noArray());
	add(temp, ch[2], sum, noArray(), CV_8U);

	for(u = 0; u < nic; u++)
	{
		try
		{
			Mat nimg = sum(Range(0, size.height), Range(u * size.width / nic, (u + 1) * size.width / nic)).clone();
			Mat tnimg = nimg.clone();
			temp = nimg.clone();
			Mat vc = vec(nimg);

			captcha_predict[u] = captcha->alphabet(cl->predict(vc));

			printf("%c\n", captcha_predict[u]);

			Mat cnt_img(size.height, size.width / nic, CV_8UC1);
			findContours(temp, cnt, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);

			for(v = 0, max_area = 0; v < cnt.size(); v++)
			{
				area = contourArea(cnt[v]);

				if(area > max_area)
				{
					max_area = area;
					max_area_ind = v;
				}
			}

			vector<vector<Point> > approx_cnt;
			approx_cnt.push_back(vector<Point>());
			approxPolyDP(cnt[max_area_ind], approx_cnt[0], 2, true);

			rectangle(cnt_img, Point(0, 0), Point(size.width, size.height), Scalar::all(0), CV_FILLED);
			drawContours(cnt_img, approx_cnt, -1, Scalar::all(255));

			namedWindow("img", CV_WINDOW_NORMAL);
			namedWindow("nimg", CV_WINDOW_NORMAL);
			namedWindow("median", CV_WINDOW_NORMAL);
			namedWindow("filter", CV_WINDOW_NORMAL);
			namedWindow("laplacian", CV_WINDOW_NORMAL);
			namedWindow("thres", CV_WINDOW_NORMAL);
			namedWindow("sum", CV_WINDOW_NORMAL);
			namedWindow("cnt_img", CV_WINDOW_NORMAL);

			imshow("img", img);
			imshow("nimg", tnimg);
			imshow("median", median);
			imshow("filter", filter);
			imshow("laplacian", lpl);
			imshow("thres", thr);
			imshow("sum", sum);
			imshow("cnt_img", cnt_img);

			waitKey();

			destroyAllWindows();
		}
		catch(...)
		{
			;
		}
	}

	return captcha->check(captcha_predict);
}
Пример #26
0
void saveExperiment(Mat &verticesFloatingPoint, Mat &depthMapFloatingPoint,Mat &colorMap, Mat &confMap, Mat &uvMap, Mat &RGBuvmap)
{
	char cCurrentPath[FILENAME_MAX];
	GetCurrentDir(cCurrentPath, sizeof(cCurrentPath));
	string working_path(cCurrentPath);
	working_path+=+"\\data\\";
	
	HandSegm::DepthNode *dep = HandSegm::DepthNode::getInstance();
	
	if(new_experiment)
	{
		name_dir.push_back(createDirs());
		new_experiment = false;
	}

	imshow("rgbuvmap",RGBuvmap);
	imshow("dep->getDepthMapFloatingPoint()",depthMapFloatingPoint);
	imshow("dep->getConfMap()",confMap);
	
	if(waitKey(1) == 's' || s_already_pressed == true)
	{
		if(!s_already_pressed)
		{
			cout << "Recording..." << endl;
		}
		s_already_pressed = true;

		vec_verticesFloatingPointArray.push_back(verticesFloatingPoint);
		vec_colorMap.push_back(colorMap);
		vec_confMap.push_back(confMap);
		vec_uvMap.push_back(uvMap);
		vec_RGBuvmap.push_back(RGBuvmap);

		frameCounter++;
	}

	if(frameCounter == nFramesPerExperiment)
	{
		cout << "Recording done." << endl;
		frameCounter = 0;
		s_already_pressed = false;
		experimentCounter++;
		new_experiment = true;
		destroyAllWindows();
	}

	if(name_dir.back() == "e")
	{
		destroyAllWindows();

		cout << "Recording complete." << endl;
		cout << "Saving " << experimentCounter << "experiments to disk..." << endl;
		
		for(int i=0;i<experimentCounter;i++)
		{
			cout<<"Saving experiment `" << name_dir[i] << "`" << endl;
			
			for(int j=0;j<nFramesPerExperiment;j++)
			{
				saveToFile<float>(vec_verticesFloatingPointArray[i*nFramesPerExperiment+j], working_path+"\\"+name_dir[i]+verticesFloatingPoint_str+"\\", j);
				saveToFile<int16_t>(vec_colorMap[i*nFramesPerExperiment+j], working_path+name_dir[i]+colorMap_str+"\\", j); 
				saveToFile<int16_t>(vec_confMap[i*nFramesPerExperiment+j], working_path+name_dir[i]+confidenceMap_str+"\\", j); 
				saveToFile<float>(vec_uvMap[i*nFramesPerExperiment+j], working_path+name_dir[i]+uvMap_str+"\\", j); 
				std::ostringstream oss;
				oss << j << ".png";
				imwrite( working_path+name_dir[i]+RGBuvMap_str+"\\"+oss.str(),vec_RGBuvmap[i*nFramesPerExperiment+j]);
				cout<< std::setprecision(3) << ((float)j/(float)nFramesPerExperiment)*100<<"%"<<endl;
			}
		}
		cout << "Saving complete." << endl;
		Sleep(1000);
		exit( 0 );
	}
}
Пример #27
0
void main()
{
    const string SRC_WIN_NAME = "ԭͼ";
    const string MED_WIN_NAME = "Ä£ºýͼ";
    const string DST_WIN_NAME = "¸´Ô­Í¼";
    const string SRC_NAME = "../../ImageRestore/images/flower.jpg";/*be_grdient.jpg*/


    Mat srcImage = imread(SRC_NAME, CV_LOAD_IMAGE_GRAYSCALE);
    Mat medImage, dstImage1, dstImage2, dstImage3, psf;
    imageInit(srcImage, srcImage);
    imshow("ԭͼ",srcImage);
    cout << "depth=" << srcImage.depth() << endl;
    fftShow(srcImage);
    //Mat A = (Mat_<float>(3, 3) << 3, 2, 1, 4.5, 5, 2, 4, 2, 3);
    //float d=A.at<float>(1,1);
    //float e = A.at<float>(1,2);
    //calWeight(srcImage, medImage);
    //cout << medImage<< endl;
    //normalize(medImage, medImage, 1, 0, NORM_MINMAX);
    //medImage=Mat_<uchar>(medImage);
    //genaratePsf(psf,15, 40);
//

//	filter2D(srcImage, medImage, -1, psf);
    //cepstral(medImage,dstImage1);
    //double  angle = directionDiffAngle(medImage);
    //cout << " angle=" << angle << endl;
    //imshow("µ¹Æ×", dstImage1);
    //wnrFilter2(medImage, psf, dstImage1);

    //inverseFilter(medImage, psf, dstImage1);
//	deconvRL3(medImage, psf, dstImage1, 20);




    //double t1 = (double)getTickCount();
    //deconvRL4(medImage, psf, dstImage3, 20);
    //t1 = ((double)getTickCount() - t1) / getTickFrequency();

    //double t2 = (double)getTickCount();
    //deconvRL5(medImage, psf, dstImage2,20);
    //t2 = ((double)getTickCount() - t2) / getTickFrequency();
    ////imwrite("lenal25a40.bmp", medImage);
    ////imwrite("lenal15a40_wnr2.bmp", dstImage2);
    //deconvRL3(medImage, psf, dstImage3, 20);
    ////double PSNR1 = calPSNR(dstImage2, srcImage);
    //double GMG1 = calGMG(srcImage);
    //double GMG2 = calGMG(medImage);
    //double GMG3 = calGMG(dstImage1);
    //double GMG4 = calGMG(dstImage2);
    //double con1 = calContrast(srcImage);
    //double con2 = calContrast(medImage);
    //double con3 = calContrast(dstImage1);
    //double con4 = calContrast(dstImage2);
    //double nsr1 = estimatek(srcImage);
    //double nsr2 = estimatek(medImage);
    //double nsr3 = estimatek(dstImage1);
    //double nsr4 = estimatek(dstImage2);


    ////double PSNR3 = calPSNR(dstImage3, srcImage);

    ////



    //
    ////cout << "t=" <<t2 << endl;
    ////
    //cout << "GMG1=" << GMG1 << endl;
    //cout << "GMG2=" << GMG2 << endl;
    //cout << "GMG3=" << GMG3 << endl;
    //cout << "GMG4=" << GMG4 << endl;
    //cout << "con1=" << con1 << endl;
    //cout << "con2=" << con2 << endl;
    //cout << "con3=" << con3 << endl;
    //cout << "con4=" << con4 << endl;
    //cout << "nsr1=" << nsr1 << endl;
    //cout << "nsr2=" << nsr2 << endl;
    //cout << "nsr3=" << nsr3 << endl;
    //cout << "nsr4=" << nsr4 << endl;

    ////imshow("Ä£ºýºË", psf);
    //
    ////namedWindow("Ä£ºýºË1", 0);

    //imshow(SRC_WIN_NAME, srcImage);
    //imshow(MED_WIN_NAME, medImage);
    //imshow("wnr+edgetaper", dstImage1);
    //imshow("RL5+edgetaper", dstImage2);
    //imshow("RL3+edgetaper", dstImage3);
//	imshow("extend", dstImage3);


    waitKey(0);
    destroyAllWindows();
}
Пример #28
0
/// Calibrates the extrinsic parameters of the setup and saves it to an XML file
/// Press'r' to retreive chessboard corners
///      's' to save and exit
///      'c' to exit without saving
/// In: inputCapture1: video feed of camera 1
///     inputCapture2: video feed of camera 2
void CalibrateEnvironment(VideoCapture& inputCapture1, VideoCapture& inputCapture2)
{
    Size boardSize;
    boardSize.width = BOARD_WIDTH;
    boardSize.height = BOARD_HEIGHT;
    
    const string fileName1 = "CameraIntrinsics1.xml";
    const string fileName2 = "CameraIntrinsics2.xml";
    
    cerr << "Attempting to open configuration files" << endl;
    FileStorage fs1(fileName1, FileStorage::READ);
    FileStorage fs2(fileName2, FileStorage::READ);
    
    Mat cameraMatrix1, cameraMatrix2;
    Mat distCoeffs1, distCoeffs2;
    
    fs1["Camera_Matrix"] >> cameraMatrix1;
    fs1["Distortion_Coefficients"] >> distCoeffs1;
    fs2["Camera_Matrix"] >> cameraMatrix2;
    fs2["Distortion_Coefficients"] >> distCoeffs2;
    
    if (cameraMatrix1.data == NULL || distCoeffs1.data == NULL ||
        cameraMatrix2.data == NULL || distCoeffs2.data == NULL)
    {
        cerr << "Could not load camera intrinsics\n" << endl;
    }
    else{
        cerr << "Loaded intrinsics\n" << endl;
        cerr << "Camera Matrix1: " << cameraMatrix1 << endl;
        cerr << "Camera Matrix2: " << cameraMatrix2 << endl;
        
    }
    
    Mat translation;
    Mat image1, image2;
    Mat mapX1, mapX2, mapY1, mapY2;
    inputCapture1.read(image1);
    Size imageSize = image1.size();
    bool rotationCalibrated = false;
    
    while(inputCapture1.isOpened() && inputCapture2.isOpened())
    {
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        
        if (rotationCalibrated)
        {
            Mat t1 = image1.clone();
            Mat t2 = image2.clone();
            remap(t1, image1, mapX1, mapY1, INTER_LINEAR);
            remap(t2, image2, mapX2, mapY2, INTER_LINEAR);
            t1.release();
            t2.release();
        }
        
        char c = waitKey(15);
        if (c == 'c')
        {
            cerr << "Cancelling..." << endl;
            return;
        }
        else if(c == 's' && rotationCalibrated)
        {
            cerr << "Saving..." << endl;
            const string fileName = "EnvironmentCalibration.xml";
            FileStorage fs(fileName, FileStorage::WRITE);
            fs << "Camera_Matrix_1" <<  getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0);
            fs << "Camera_Matrix_2" <<  getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0);
            fs << "Mapping_X_1" << mapX1;
            fs << "Mapping_Y_1" << mapY1;
            fs << "Mapping_X_2" << mapX2;
            fs << "Mapping_Y_2" << mapY2;
            fs << "Translation" << translation;
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if(c == 's' && !rotationCalibrated)
        {
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if (c == 'r')
        {
            BoardSettings s;
            s.boardSize.width = BOARD_WIDTH;
            s.boardSize.height = BOARD_HEIGHT;
            s.cornerNum = s.boardSize.width * s.boardSize.height;
            s.squareSize = (float)SQUARE_SIZE;
            
            vector<Point3f> objectPoints;
            vector<vector<Point2f> > imagePoints1, imagePoints2;
            
            if (RetrieveChessboardCorners(imagePoints1, imagePoints2, s, inputCapture1, inputCapture2, ITERATIONS))
            {
                vector<vector<Point3f> > objectPoints(1);
                CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                Mat R, T, E, F;
                Mat rmat1, rmat2, rvec;
                
                double rms = stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                             TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                             CV_CALIB_FIX_INTRINSIC);
                
                cerr << "Original translation: " << T << endl;
                cerr << "Reprojection error reported by camera: " << rms << endl;
                
                // convert to rotation vector and then remove 90 degree offset
                Rodrigues(R, rvec);
                rvec.at<double>(1,0) -= 1.570796327;
                
                // equal rotation applied to each image...not necessarily needed
                rvec = rvec/2;
                Rodrigues(rvec, rmat1);
                invert(rmat1,rmat2);
                
                initUndistortRectifyMap(cameraMatrix1, distCoeffs1, rmat1,
                                        getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0), imageSize, CV_32FC1, mapX1, mapY1);
                initUndistortRectifyMap(cameraMatrix2, distCoeffs2, rmat2,
                                        getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0), imageSize, CV_32FC1, mapX2, mapY2);
                
                
                // reproject points in camera 1 since its rotation has been changed
                // need to find the translation between cameras based on the new camera 1 orientation
                for  (int i = 0; i < imagePoints1.size(); i++)
                {
                    Mat pointsMat1 = Mat(imagePoints1[i]);
                    Mat pointsMat2 = Mat(imagePoints2[i]);
                    
                    
                    undistortPoints(pointsMat1, imagePoints1[i], cameraMatrix1, distCoeffs1, rmat1,getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1, imageSize, 0));
                    undistortPoints(pointsMat2, imagePoints2[i], cameraMatrix2, distCoeffs2, rmat2,getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0));
                    
                    pointsMat1.release();
                    pointsMat2.release();
                }
                
                Mat temp1, temp2;
                R.release();
                T.release();
                E.release();
                F.release();
                
                // TODO: remove this
                // CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                // objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                CV_CALIB_FIX_INTRINSIC);
                
                // need to alter translation matrix so
                // [0] = distance in X direction (right from perspective of camera 1 is positive)
                // [1] = distance in Y direction (away from camera 1 is positive)
                // [2] = distance in Z direction (up is positive)
                translation = T;
                double temp = -translation.at<double>(0,0);
                translation.at<double>(0,0) = translation.at<double>(2,0);
                translation.at<double>(2,0) = temp;
                
                cerr << "Translation reproj: " << translation << endl;
                Rodrigues(R, rvec);
                cerr << "Reprojected rvec: " << rvec << endl;
                
                imagePoints1.clear();
                imagePoints2.clear();
                
                rvec.release();
                rmat1.release();
                rmat2.release();
                R.release();
                T.release();
                E.release();
                F.release();
                
                rotationCalibrated = true;
            }
        }
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
}
Пример #29
0
void KinectDataMan::ShowColorDepth()
{
	// init kinect and connect
	if( m_cvhelper.IsInitialized() )
		return;

	m_cvhelper.SetColorFrameResolution(color_reso);
	m_cvhelper.SetDepthFrameResolution(depth_reso);

	HRESULT hr;
	// Get number of Kinect sensors
	int sensorCount = 0;
	hr = NuiGetSensorCount(&sensorCount);
	if (FAILED(hr)) 
	{
		return;
	}

	// If no sensors, update status bar to report failure and return
	if (sensorCount == 0)
	{
		cerr<<"No kinect"<<endl;
	}

	// Iterate through Kinect sensors until one is successfully initialized
	for (int i = 0; i < sensorCount; ++i) 
	{
		INuiSensor* sensor = NULL;
		hr = NuiCreateSensorByIndex(i, &sensor);
		if (SUCCEEDED(hr))
		{
			hr = m_cvhelper.Initialize(sensor);
			if (SUCCEEDED(hr)) 
			{
				// Report success
				cerr<<"Kinect initialized"<<endl;
				break;
			}
			else
			{
				// Uninitialize KinectHelper to show that Kinect is not ready
				m_cvhelper.UnInitialize();
				return;
			}
		}
	}

	DWORD width, height;
	m_cvhelper.GetColorFrameSize(&width, &height);
	Size colorsz(width, height);
	Mat cimg(colorsz, m_cvhelper.COLOR_TYPE);
	m_cvhelper.GetDepthFrameSize(&width, &height);
	Size depthsz(width, height);
	Mat dimg(depthsz, m_cvhelper.DEPTH_RGB_TYPE);

	// start processing
	while(true)
	{
		// get color frame
		if( SUCCEEDED(m_cvhelper.UpdateColorFrame()) )
		{
			HRESULT hr = m_cvhelper.GetColorImage(&cimg);
			if(FAILED(hr))
				break;

			imshow("color", cimg);
			if( waitKey(10) == 'q' )
				break;
		}

		if( SUCCEEDED(m_cvhelper.UpdateDepthFrame()) )
		{
			HRESULT hr = m_cvhelper.GetDepthImageAsArgb(&dimg);
			if(FAILED(hr))
				break;

			imshow("depth", dimg);
			if( waitKey(10) == 'q' )
				break;
		}
	}

	destroyAllWindows();

}
Пример #30
0
Depth_processing::~Depth_processing()
{
	//destroy GUI windows
	destroyAllWindows();
	
}