예제 #1
0
bool CameraVideo::loadNextDataSet(string &location){

    if(mVideoID != 0){

        cout << "Change video : " << mVideoID << " - Path : " << mVideoList.at(mVideoID) << endl;

        mCap = VideoCapture(mVideoList.at(mVideoID));

        if(!mCap.isOpened()){

             cout << "Cannot open the video file" << endl;
             return false;

        }else{

            cout << "Success to open the video file" << endl;

        }

        mFrameHeight = mCap.get(CV_CAP_PROP_FRAME_HEIGHT);

        mFrameWidth = mCap.get(CV_CAP_PROP_FRAME_WIDTH);

        mReadDataStatus = false;

    }

    return true;

}
예제 #2
0
void Calibration::init(){
	if(device){
		cam = VideoCapture(idCam);
		for(int i = 0 ; i < 30 ; i++){
			cam >> inputImage;
		}
	}

	rotation = 0;
	for(int i = 0 ; i < 3 ; i++){
		staticVisionColorHelper[i] = 0;
		staticVisionColorHelper[i+3] = 255;
		staticVisionColor.min = Pixel(0, 0, 0);
		staticVisionColor.max = Pixel(0, 0, 0);
	}

	// Se o estágio de calibragem é uma cor
	if(calibrationStage >= 0 && calibrationStage <= 7){
		calibrationVisionColor();
	}else{
		if(calibrationStage == ROTATION){
			calibrationRotation();
		}else
		if(calibrationStage == CUT){
			calibrationCut();
		}
	}
}
예제 #3
0
void kuwaharaFilter::videoFilter(const string &src_path, const string &dst_path) {
    
    VideoCapture v_reader = VideoCapture(src_path.c_str());
    
    double reszie_ratio = 1.0;
    
    if ( v_reader.isOpened() ) {
        
        Mat temp;
        v_reader >> temp;
        
        resizeMat(temp, reszie_ratio);
        
        VideoWriter v_writer = VideoWriter(dst_path.c_str(), CV_FOURCC('P','I','M','1'), 30, temp.size());
        
        while (v_reader.grab()) {

            resizeMat(temp, reszie_ratio);
            
            temp = imageFilter(temp, 3);
            
            temp.convertTo(temp, CV_8UC3);
            
            v_writer.write(temp);
            v_reader >> temp;

        }
        
        v_writer.release();
    
    }
예제 #4
0
Camera::Camera(){
	cap = VideoCapture(0);
	cap.set(CV_CAP_PROP_FRAME_WIDTH, MAXXCOOR);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, MAXYCOOR);
	if (!cap.isOpened()) {
		std::cout << "Câmera desconectada" << std::endl;
	}
	FILE *f;
	//Se der segfault, copiar os seguintes arquivos para a pasta de execução:
	if((f = fopen("iLowH.txt", "r"))) {
		fscanf(f, "%d", &iLowH);
		fclose(f);
	} else iLowH = 0;

	if((f = fopen("iHighH.txt", "r"))) {
		fscanf(f, "%d", &iHighH);
		fclose(f);
	} else iHighH = 27;
	if((f = fopen("iLowS.txt", "r"))) {
		fscanf(f, "%d", &iLowS);
		fclose(f);
	} else iLowS = 121;
	if((f = fopen("iHighS.txt", "r"))) {
		fscanf(f, "%d", &iHighS);
		fclose(f);
	} else iHighS = 233;
	if((f = fopen("iLowV.txt", "r"))) {
		fscanf(f, "%d", &iLowV);
		fclose(f);
	} else iLowV = 180;
	if((f = fopen("iHighV.txt", "r"))) {
		fscanf(f, "%d", &iHighV);
		fclose(f);
	} else iHighV = 255;
	if((f = fopen("ErodeDilate.txt", "r"))) {
		fscanf(f, "%d", &ErodeDilate);
		fclose(f);
	} else ErodeDilate = 1;

#ifdef DEBUG
	namedWindow("Drawing", CV_WINDOW_AUTOSIZE);
	namedWindow("Control", CV_WINDOW_AUTOSIZE);
	namedWindow("Tresh", CV_WINDOW_AUTOSIZE);
	namedWindow("Biggest",CV_WINDOW_AUTOSIZE);

	cvCreateTrackbar("LowH", "Control", &iLowH, 179, saveiLowH); //Hue (0 - 179)
	cvCreateTrackbar("HighH", "Control", &iHighH, 179, saveiHighH);

	cvCreateTrackbar("LowS", "Control", &iLowS, 255, saveiLowS); //Saturation (0 - 255)
	cvCreateTrackbar("HighS", "Control", &iHighS, 255, saveiHighS);

	cvCreateTrackbar("LowV", "Control", &iLowV, 255, saveiLowV); //Value (0 - 255)
	cvCreateTrackbar("HighV", "Control", &iHighV, 255, saveiHighV);

	cvCreateTrackbar("Erode & Dilate", "Control", &ErodeDilate, 5, saveErodeDilate);
#endif
}
//------------------------------------------------------------------------------
opencvWebcam::opencvWebcam(): cameraId(0)
{
   cv::FileStorage fileStorage(PKGDATADIR "/config.xml", cv::FileStorage::READ);
   if (fileStorage.isOpened() ) {
      cameraId = (int)fileStorage["CAMERA_INDEX"];
   }

   VideoCapture(cameraId);
}
예제 #6
0
//Detector::Detector(Mat &frame, int camnum, float exposure, int f_height, int f_width)
Detector::Detector(int camnum, float exposure, int f_height, int f_width)
{
    this->f_height = f_height;
	this->f_width = f_width;
	this->exposure = exposure;
    this->frame = Mat::zeros(f_height, f_width, CV_8SC3);
    
    mask = Mat::zeros(f_height, f_width, CV_8UC1);
    
    double color_dist_th = 15;
    int blocksize = f_width / VISION_NUM_RAYS;
    int offset = 1 * blocksize;
    m_rangeFinder = new RangeFinder(f_height, f_width, blocksize, color_dist_th, offset);

    
    /*
     
    // -------------- Background segmentation using k-means --------------
    k_blur = Size(3,3);
    
    // morphology
    cross3 = getStructuringElement(MORPH_CROSS, Size(3,3));
    
    normalize = false;
    // initialize background patterns
    if (normalize) {
        double dist_th = 0.03;
        bg_tile = new BGPattern(n_color_tiles[0], n_color_tiles[1], n_color_tiles[2], dist_th, f_height, f_width);
        bg_grass = new BGPattern(n_color_grass[0], n_color_grass[1], n_color_grass[2], dist_th, f_height, f_width);
        bg_wood = new BGPattern(n_color_wood[0], n_color_wood[1], n_color_wood[2], dist_th, f_height, f_width);
        bg_obstacle = new BGPattern(n_color_obstacle[0], n_color_obstacle[1], n_color_obstacle[2], dist_th, f_height, f_width);
    } else {
        double dist_th = 30;
        bg_tile = new BGPattern(color_tiles[0], color_tiles[1], color_tiles[2], dist_th, f_height, f_width);
        bg_grass = new BGPattern(color_grass[0], color_grass[1], color_grass[2], dist_th, f_height, f_width);
        bg_wood = new BGPattern(color_wood[0], color_wood[1], color_wood[2], dist_th, f_height, f_width);
        bg_obstacle = new BGPattern(color_obstacle[0], color_obstacle[1], color_obstacle[2], dist_th, f_height, f_width);
    }
    
//    cout << *bg_tile << endl;
//    patterns.push_back(bg_tile);
//    patterns.push_back(bg_grass);
//    patterns.push_back(bg_wood);
//    patterns.push_back(bg_obstacle);
     
     */
    
    cap = VideoCapture(camnum); // open the default camera
    if (!cap.isOpened())  // check if we succeeded
    {
        cout << "no camera" << endl;
    }
    else {
        setSettings();
        //printSettings();
    }
}
RetrievingFrame::RetrievingFrame(QObject *parent) :
    QThread(parent)
{
    camera = VideoCapture(0);
    camera.set(CV_CAP_PROP_FRAME_WIDTH,320);
    camera.set(CV_CAP_PROP_FRAME_HEIGHT,240);
    camera.set(CV_CAP_PROP_SATURATION,0.7);
    STOP = false;
}
예제 #8
0
VideoSource::VideoSource(int width, int height){
	videoSource_ = VideoCapture(0); 
	isInit_ = initVideoSource();
	if (isInit_){
		videoSource_.set(CV_CAP_PROP_FRAME_WIDTH,width);
		videoSource_.set(CV_CAP_PROP_FRAME_HEIGHT,height);
	//	videoSource_.set(CV_CAP_PROP_FPS,30);
	}
}
// Constructor
SavingThread::SavingThread() : QThread()
{
    this->doStop = true;

    currentWriteIndex = 0;
    processingBufferLength = 1;

    cap = VideoCapture();
    out = VideoWriter();
}
예제 #10
0
int main()
{
    Mat img1, img2, gray1, gray2;
    VideoCapture cap1 = VideoCapture(0);
    VideoCapture cap2 = VideoCapture(2);
    int success = 0, k = 0;
    bool found1 = false, found2 = false;
    
    while(1)
    {
        cap1 >> img1;
        cap2 >> img2;
        
        imshow("camera1", img1);
        imshow("camera2", img2);
    }
    
    return 0;
}
예제 #11
0
VideoCapture v4l2cap(int device, int width, int height, int fps) {
    VideoCapture cap = VideoCapture(device);
    if (!cap.isOpened()) {
        fprintf(stderr, "Error opening video capture %d\n", device);
        exit(1);
    }


    cap.set(CV_CAP_PROP_FRAME_WIDTH, width);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, height);

    // Ottengo il file descriptor della webcam
    int fd = -1;
    DIR* dir;
    struct dirent *ent;
    // Possiamo assumere che questo ci sia...
    dir = opendir("/proc/self/fd");
    while ((ent=readdir(dir)) != NULL) {
        // Salta . e ..
        if(ent->d_name[0] == '.') continue;
        char fl[100] = "/proc/self/fd/";
        char dst[100];
        strcat(fl, ent->d_name);
        if(readlink(fl, dst, 100) == -1) {
            fprintf(stderr, "Error listing file descriptors links\n");
            exit(2);
        }
        if(strstr(dst, "/dev/video")) {
            fd = atoi(ent->d_name);
        }
    }
    if(fd == -1) {
        fprintf(stderr, "Cannot find webcam's file descriptor "
                        "- this should not happen!\n");
        exit(3);
    }
    closedir(dir);

    v4l2_streamparm streamparm;
    streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if(xioctl(fd, VIDIOC_G_PARM, &streamparm) < 0) {
        fprintf(stderr, "Error getting stream parameters!\n");
        exit(4);
    }
    if(!(streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME)) {
        fprintf(stderr, "Device does not support setting frame rate!");
        exit(5);
    }
    streamparm.parm.capture.timeperframe.numerator = 1;
    streamparm.parm.capture.timeperframe.denominator = fps;
    if(xioctl(fd, VIDIOC_S_PARM, &streamparm) < 0) {
        fprintf(stderr, "Error setting device framerate!\n");
    }
    return cap;
}
예제 #12
0
CameraVideo::CameraVideo(vector<string> videoList, bool verbose):mVideoID(0), mFrameWidth(0), mFrameHeight(0), mReadDataStatus(false){

    mVideoList = videoList;

    // Open the video file for reading.
    if(mVideoList.size()>0)
        mCap = VideoCapture(videoList.front());
    else 
        throw "No video path in input.";

    mExposureAvailable = false;
    mGainAvailable = false;
    mInputDeviceType = VIDEO;
    mVerbose = verbose;

}
OpenCVCapture::OpenCVCapture( int32_t cam, int32_t rows, int32_t cols ): _isError(false){
	LOGI_OCV("OpenCVCapture constructor begin");

	_cv = VideoCapture(CV_CAP_ANDROID + cam);
	if(_cv.isOpened()){
		LOGI_OCV("OpenCVCapture created");
		_cv.set(CV_CAP_PROP_FRAME_HEIGHT, rows);
		_cv.set(CV_CAP_PROP_FRAME_WIDTH,  cols);

	}else{
		_isError = true;
		LOGI_OCV("OpenCVCapture can't create");
	}

	LOGI_OCV("OpenCVCapture constructor end");
}
bool CCapturador::tryCamera(int device)
{
	if (!m_VideoCapture.isOpened())  // check if we succeeded
		m_VideoCapture = VideoCapture(device);
	if (!m_VideoCapture.isOpened())
		return false;
	char key = 0;
	Mat frame;
	while (key==-1||key==0)
	{
		m_VideoCapture >> frame;
		if (!frame.empty())
			imshow("Camara", frame);
		key = cvWaitKey(30);
	}
	cvvDestroyWindow("Camara");
}
예제 #15
0
videoThread::videoThread(Mat & outputMat):outMat(outputMat)
{
    cap = VideoCapture(0);
    cap.set(CV_CAP_PROP_FRAME_WIDTH,640);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT,480);
    runFlag = 1;
    detect = new faceDetector();
    objectDetect = new moveObjDetector();
    faceRec = new faceRecognition();

    objMatPtr = 0;
    collectMode = false;
    FINDBALL = false;
    showMessage = "Unknown person";

    findCenter = Point(0,0);
    detectH = detectS = detectV;
}
int ImageAcquisition::openCamDevice(int ID, int w, int h){

	cap = VideoCapture(ID); // open the default camera
	if(!cap.isOpened()){  // check if we succeeded
		cout << "Error opening the device" << endl;
		return -1;
	}

	cap.set(CV_CAP_PROP_FRAME_WIDTH,w);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT,h);
	cap.set(CV_CAP_PROP_POS_FRAMES,4);

	pthread_t grab_thread;
	pthread_create( &grab_thread, NULL, grab_function, (void*) &cap);

	return 0;

};
예제 #17
0
void vision::run(){
    saved = imread(path_image.c_str());
    in = saved.clone();

    lbl_input->setPixmap(QPixmap::fromImage(mat2Image( Mat(480, 770, CV_8UC3, Scalar(130, 70, 40)) )));

    while(run_it){
        if(vision_reception){
            if(device_used == CAMERA){
                if(cap.isOpened()){
                    cap >> in;
                }else{
                    cap.release();
                    cap = VideoCapture(id_camera);
                    cap >> in;
                }
            }else
            if(device_used == IMAGE){
예제 #18
0
/**
 * Temporal smoothing for a colored picture
 */
Mat temporalSmoothingColor(String filename){

    /*Results to to return*/
    Mat background;
    /*First ten pictures of the video*/
    Mat stockage[10];
    /** Video*/
    VideoCapture vc = VideoCapture(filename);

    /*Acquiring the ten pictures*/
    for(int i = 0; i< 10; i++){
        vc >> stockage[i];
    }
    background = Mat(stockage[0].size(), stockage[0].type());
	
	double h = 0.1;
    double res0, res1, res2;
    int lignes = background.rows;
    int colonnes = background.cols;
    int pas = background.step;
	
    /*Creation of a picture where each pixel is the average value of the matching pixel in the ten pictures*/ 
    for(int x = 0; x < lignes; x++){
        for(int y = 0; y < colonnes; y ++){

            res0 = res1 = res2 = 0;
            for (int u = 0; u < 10; u++)
            {
				//res = res + (h*(stockage[u].at<Vec3b>(x, y)[k]));
				res0 += stockage[u].data[x*pas+y*3+0];
				res1 += stockage[u].data[x*pas+y*3+1];
				res2 += stockage[u].data[x*pas+y*3+2];
            }
            background.data[x*pas+y*3+0] = (unsigned char)(res0 * h);
            background.data[x*pas+y*3+1] = (unsigned char)(res1 * h);
            background.data[x*pas+y*3+2] = (unsigned char)(res2 * h);

        }
    }
    vc.release();
    //spatialSmoothingAvgColor(background, 1);
    return background;
}
예제 #19
0
/**
 * Temporal smoothing for a grayscale picture
 */
Mat temporalSmoothing(String filename){

    /*Results to to return*/
    Mat background;
    /*First ten pictures of the video*/
    Mat stockage[10];
    
    Mat temp;
    /** Video*/
    VideoCapture vc = VideoCapture(filename);

    /*Acquiring the ten pictures*/
    for(int i = 0; i< 10; i++){
        vc >> temp;
		/*Switching from a colored picture to a greyscale one*/
        cvtColor(temp, stockage[i], CV_BGR2GRAY);
    }
    background = Mat(stockage[0].size(), stockage[0].type());
	
	double h = 0.1;
    double res;
    int lignes = background.rows;
    int colonnes = background.cols;
    int pas = background.step;
	
    /*Creation of a picture where each pixel is the average value of the matching pixel in the ten pictures*/ 
    for(int x = 0; x < lignes; x++){
        for(int y = 0; y < colonnes; y ++){

            res = 0;
            for (int u = 0; u < 10; u++)
            {
				res += stockage[u].data[x*pas+y];
            }
            background.data[x*pas+y] = (unsigned char)(res * h);

        }
    }
    vc.release();
    //spatialSmoothingAvgColor(background, 1);
    return background;
}
CHandGestureRecognitionSystemDlg::CHandGestureRecognitionSystemDlg(CWnd* pParent /*=NULL*/)
    : CDialogEx(CHandGestureRecognitionSystemDlg::IDD, pParent)
    , image_height(240)
    , image_width(320)
    , image_interval(20)
    , point_begin(image_width / 2, image_height / 2)
    , point_end(image_width / 2, image_height / 2)
    , vector_angle(0)
    , vector_length(0)
    , vector_threshold(30)
{
    m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
    videocapture = VideoCapture(0);
    image_camera = Mat(image_height, image_width, CV_8UC3);
    image_preprocess = Mat(image_height, image_width, CV_8UC3);
    image_segment = Mat(image_height, image_width, CV_8UC3);
    image_feature = Mat(image_height, image_width, CV_8UC3);
    image_recognition = Mat(image_height, image_width, CV_8UC3);
    image_background = Mat(image_height, image_width, CV_8UC3);
}
예제 #21
0
geyeview::geyeview(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::geyeview)
{
    ui->setupUi(this);

    // create video-from-webcam capture object
    ge_cap = VideoCapture(0);

    // create opencv mat image using single frame capture
    ge_cap >> ge_img;

    // create qt image container using mat image data
    ge_img_qt = QImage(ge_img.data,
                       ge_img.size().width,
                       ge_img.size().height,
                       QImage::Format_RGB888);

    // Webcam frame-grab timer
    ge_cap_timer = new QTimer(this);
    connect(ge_cap_timer,SIGNAL(timeout()),this,SLOT(geProcess()));
    ge_cap_timer->start(GRAB_MS);
}
예제 #22
0
bool initCamera()
{
    int size_w = 320;
    int size_h = 240;

    cap = VideoCapture(-1);

    if (!cap.isOpened())
    {
        cout << "ERRO Camera cannot be opened" << endl;


        return false;
    }else{
        printf("Camera is opened\n");
    }

    cap.set(CV_CAP_PROP_FRAME_WIDTH, size_w);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, size_h);

    isFakeMode = false;

    return true;
}
예제 #23
0
void WebcamHandler::run()
{

	// initialize webcam
	VideoCapture cap = VideoCapture(0);
	cap.set(CV_CAP_PROP_FRAME_WIDTH, m_frameWidth);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, m_frameHeight);	

	// initialize window
	namedWindow("Settings", CV_WINDOW_AUTOSIZE);
	namedWindow("FaceRepair", CV_WINDOW_NORMAL);
	cvSetWindowProperty("FaceRepair", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);

	cvWaitKey(1000);

	float* hidden;
	float* visible;

	while (m_loop)
	{
		// read frame and continue with next frame if not successfull
		Mat frame;
		cap.retrieve(frame);
		flip(frame, frame, 1);

		// take subimage at faceArea
		Mat subimage;
		frame(*m_faceArea).copyTo(subimage);
		Mat subimageHSV;
		cvtColor(subimage, subimageHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV

		// detect color
		Mat mask;
		inRange(subimageHSV, *m_detectionColorMin, *m_detectionColorMax, mask);
		erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
		dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(15, 15)));
		Mat invertedMask = 255 - mask;

		// scale to rbm input size
		Size size = Size(m_edgeLength, m_edgeLength);
		Mat scaledSubimage;	
		resize(subimage, scaledSubimage, size, 0.0, 0.0, INTER_LINEAR);
		Mat scaledMask;
		resize(mask, scaledMask, size, 0.0, 0.0, INTER_NEAREST);
		Mat invertedScaledMask = 255 - scaledMask;

		// calc mean rgb of preserved area
		Scalar bgr = mean(scaledSubimage, invertedScaledMask);

		// set mean rgb at reconstructionArea
		scaledSubimage.setTo(bgr, scaledMask);

		// subimage to normalized float array
		visible = matToNormalizedFloatArrayWithBias(&scaledSubimage);

		// process RBMs
		hidden = m_rbm1000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1000->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm1500->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1500->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm2000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm2000->runVisible(hidden, 1);
		delete hidden;

		// normalized float array to subimage
		normalizedFloatArrayToMatWithoutBias(visible, &scaledSubimage);

		// scale to original faceArea size
		Mat result;
		size = Size(m_faceArea->width, m_faceArea->height);
		resize(scaledSubimage, result, size, 0.0, 0.0, INTER_CUBIC);

		// reset pixels of preserved area in native resolution
		subimage.copyTo(result, invertedMask);

		// create fullscreen image
		Mat fs;
		frame.copyTo(fs);
		result.copyTo(fs(*m_faceArea));
		flip(fs, fs, 1);
		
		// maybe not necessary
		//result.copyTo(frame(*m_faceArea));
		
		// paint visualizations for settings image
		rectangle(frame, *m_faceArea, Scalar(0, 255, 0));
		Point* eyePositions = calculateEyePositions(m_faceArea, m_relativeEyePositionX, m_relativeEyePositionY);
		circle(frame, eyePositions[0], 4, Scalar(255, 255, 0));
		circle(frame, eyePositions[1], 4, Scalar(255, 255, 0));
		delete eyePositions;

		// show frames
		imshow("Settings", frame);
		imshow("FaceRepair", fs);
		
		// check keyboard input
		checkKeys();
	}
	// terminate webcam
	cap.release();
}
예제 #24
0
void CameraManager::bindCamera(int port) {
    _capture = VideoCapture(port);
}
예제 #25
0
MyInputImgStruct::MyInputImgStruct(int devIndex){
	camSrc = devIndex; //holds webcam position for external use
	vidCap = VideoCapture(devIndex); // creates video capture object
	                                //from webcam position devIndex
}
InputProcessing::InputProcessing(int inputType, bool DEBUG_MODE) :
    faceCascade(CascadeClassifier()),
    inputType(inputType),
    DEBUG_MODE(DEBUG_MODE),
    eyeCascadeGlasses(CascadeClassifier()),
    eyeCascade(CascadeClassifier())

{

    printf("loading \n");
    if (!faceCascade.load("cascades\\haarcascade_frontalface_alt.xml")) {
        printf("--(!)File not found faceCascade\n");
        exit(-11);
    }
    if (!eyeCascadeGlasses.load("cascades\\haarcascade_eye_tree_eyeglasses.xml")) {
        printf("--(!)File not found eyeCascadeGlasses\n");
        exit(-12);
    }
    if (!eyeCascade.load("cascades\\haarcascade_eye.xml")) {
        printf("--(!)File not found eyeCascade\n");
        exit(-13);
    }
    if (inputType < 1 || inputType > 4) {
        printf("--(!)Input type %i not specified\n", inputType);
        exit(-15);
    }
    else if (inputType == INPUT_TYPE_CAMERA_INPUT) {
        // set default camera
        cap = VideoCapture(0);
        if (!cap.isOpened()) {
            printf("--(!)Camera 0 not available\n");
        }
    }
    else if (inputType == INPUT_TYPE_GI4E_DB) {
        /*
        load ground truth

        Format for GI4E image_labels.txt:
        xxx_yy.png	x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6
        The first point (x1,y1) is the external corner of the left user's eye. The second point is the centre of the left iris.
        The third one is the internal corner of the left eye. The other three points are internal corner, iris centre and
        external corner of the right eye.

        */

        ifstream file("../GI4E/labels/image_labels.txt");
        string line;
        if (file.is_open()) {
            while (getline(file, line)) {
                try {
                    istringstream iss(line);
                    string filename, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6, y6;
                    getline(iss, filename, '\t');
                    getline(iss, x1, '\t');
                    getline(iss, y1, '\t');
                    getline(iss, x2, '\t');
                    getline(iss, y2, '\t');
                    getline(iss, x3, '\t');
                    getline(iss, y3, '\t');
                    getline(iss, x4, '\t');
                    getline(iss, y4, '\t');
                    getline(iss, x5, '\t');
                    getline(iss, y5, '\t');
                    getline(iss, x6, '\t');
                    getline(iss, y6, '\t');
                    vector<Point2f> v;

                    v.push_back(Point2f(stof(x1), stof(y1)));
                    v.push_back(Point2f(stof(x2), stof(y2)));
                    v.push_back(Point2f(stof(x3), stof(y3)));
                    v.push_back(Point2f(stof(x4), stof(y4)));
                    v.push_back(Point2f(stof(x5), stof(y5)));
                    v.push_back(Point2f(stof(x6), stof(y6)));
                    v.shrink_to_fit();
                    labels.push_back(v);
                }
                catch (Exception e) {
                    printf("--(!)Error while parsing /GI4E/labels/image_labels.txt\n");
                }
            }
            labels.shrink_to_fit();
            file.close();
        }
    }

}
예제 #27
0
Visao::Visao(){    
    cam = VideoCapture(0);
    variacao = 0.2;
    variacaoH = 4;    
    calibrar = false;
}    
#include "OpenCVItemProcessing.h"

std::string OpenCVItemProcessing::windowName	= "Video Feedback";
cv::Mat OpenCVItemProcessing::videoImage		= Mat();
cv::VideoCapture OpenCVItemProcessing::camera	= VideoCapture(0);
int OpenCVItemProcessing::cameraIdx				= 0;

static int ref0 = 1;

static int r_max = 255;
static int r_min = 0;
static int g_max = 255;
static int g_min = 0;
static int b_max = 255;
static int b_min = 0;

static int exposure = 12;

typedef cv::Point3_<uint8_t> Pixel;

int OpenCVItemProcessing::Run(int argc, char** argv)
{
	if (!Startup()) return -1;
	else
	{
		while (Loop()){}

		Cleanup();

		return 0;
	}
bool CCapturador::CapturePatterns(int time,int device,int posX,int posY,bool useComp)
{
	m_vCaptures.clear();
	
	//VideoCapture cap(0); // open the default camera
	if (!m_VideoCapture.isOpened())  // check if we succeeded
		m_VideoCapture = VideoCapture(device);
	if (!m_VideoCapture.isOpened())
		return false;
		
	//VideoCapture cap(0);
	//if (!cap.isOpened())
	//	return -1;
	bool bMakeCapture = false;
	int nPatterns = 0;
	//namedWindow("Camera", 1);
	//cvWaitKey(500);
	//m_VideoCapture >> m_mTextura;
	//imwrite("Textura.bmp", 0);
	namedWindow("Patrones");

	HWND win_handle = FindWindow(0, "Patrones");
	if (!win_handle)
	{
		printf("Failed FindWindow\n");
	}

	// Resize
	unsigned int flags = (SWP_SHOWWINDOW | SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER);
	flags &= ~SWP_NOSIZE;
	unsigned int x = posX;
	unsigned int y = posY;
	printf("x = %d y = %d", x, y);
	unsigned int w = m_Options->m_nWidth;
	unsigned int h = m_Options->m_nHeight;
	SetWindowPos(win_handle, HWND_TOP, x, y, w, h, flags);

	// Borderless
	SetWindowLong(win_handle, GWL_STYLE, GetWindowLong(win_handle, GWL_EXSTYLE) | WS_EX_TOPMOST);
	ShowWindow(win_handle, SW_SHOW);
	cvMoveWindow("Patrones", posX, posY);
	cvWaitKey(2000);
	auto A = GetTickCount();
	auto B = GetTickCount();
	for (int i = 0;;)
	{
		imshow("Patrones", m_vPatterns[i]);
		Mat frame;
		m_VideoCapture >> frame;
		//imshow("Camera", frame);
		B = GetTickCount();
		int C = B - A;
		if (C>time || waitKey(30) >= 0)
		{
			if (!frame.empty())
			if (useComp)
			{
				i++;
				Mat capture = frame.clone();
				Mat gray;
				cv::cvtColor(capture, gray, CV_BGR2GRAY);
				m_vCaptures.push_back(gray);
				if (++nPatterns >= m_nPatterns)
				{
					m_mTextura = capture.clone();
					break;
				}
					
			}
			else
			{
				i += 2;
				Mat capture = frame.clone();
				Mat gray;
				cv::cvtColor(capture, gray, CV_BGR2GRAY);
				m_vCaptures.push_back(gray);
				nPatterns += 2;
				if (nPatterns >= m_nPatterns)
					break;
			}
			else
				printf("Error: no caputre info.\n");
			A = GetTickCount();
		};
	}
	cout << "Patrones capturados." << endl;
	cvDestroyWindow("Patrones");
	//cvDestroyWindow("Camera");
	return true;
}
예제 #30
0
Camera::Camera(int numCamera)
{
	cap = VideoCapture(numCamera);
}