Exemplo n.º 1
0
void getGray(cv::Mat frame, cv::Mat *gray) {
    // int resizeFactor = 1;
    //resize(frame, *output, Size(frame.size().width/resizeFactor, frame.size().height/resizeFactor));
    //cvtColor(frame, *gray, CV_BGR2GRAY);
    std::vector<cv::Mat> rgbChannels(3);
    cv::split(frame, rgbChannels);
    *gray = rgbChannels[2];
}
Exemplo n.º 2
0
void PBAS::calculateFeatures(std::vector<cv::Mat>* feature, cv::Mat* inputImage)
{
	if(!feature->empty())
		feature->clear();


	cv::Mat mag[3], dir;

	if(inputImage->channels() == 3)
	{
		std::vector<cv::Mat> rgbChannels(3);
		cv::split(*inputImage, rgbChannels);
		
		for(int l = 0; l < 3; ++l)
		{
			cv::Sobel(rgbChannels.at(l),sobelX,CV_32F,1,0, 3, 1, 0.0);
			cv::Sobel(rgbChannels.at(l),sobelY,CV_32F,0,1, 3, 1, 0.0);
			
			// Compute the L2 norm and direction of the gradient
			cv::cartToPolar(sobelX,sobelY,mag[l],dir, true);
			feature->push_back(mag[l]);
			sobelX.release();
			sobelY.release();
		}
		
		feature->push_back(rgbChannels.at(0));
		feature->push_back(rgbChannels.at(1));
		feature->push_back(rgbChannels.at(2));
		rgbChannels.at(0).release();
		rgbChannels.at(1).release();
		rgbChannels.at(2).release();

	}
	else
	{
		cv::Sobel(*inputImage,sobelX,CV_32F,1,0, 3, 1, 0.0);
		cv::Sobel(*inputImage,sobelY,CV_32F,0,1, 3, 1, 0.0);

		// Compute the L2 norm and direction of the gradient
		cv::cartToPolar(sobelX,sobelY,mag[0],dir, true);
		feature->push_back(mag[0]);

		cv::Mat temp;
		inputImage->copyTo(temp);
		feature->push_back(temp);
		temp.release();
	}


	mag[0].release();
	mag[1].release();
	mag[2].release();
	dir.release();
}
Exemplo n.º 3
0
/**
 * @function detectAndDisplay
 */
int detectAndDisplay( cv::Mat frame ) {
  std::vector<cv::Rect> faces;
  //cv::Mat frame_gray;

  std::vector<cv::Mat> rgbChannels(3);
  cv::split(frame, rgbChannels);
  cv::Mat frame_gray = rgbChannels[2];
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
//  findSkin(debugImage);

  for( int i = 0; i < faces.size(); i++ )
  {
    rectangle(debugImage, faces[i], 1234);
  }
  //-- Show what you got
  if (faces.size() > 0) {
    findEyes(frame_gray, faces[0]);
      
      if (frame_gray.data) {
          frame_gray.convertTo(frame_gray, CV_32FC1);
          cv::Mat target = CropFace(frame_gray,leftPupil, rightPupil, offset_pct, dest_sz);
          
          if (errorflag) {
              return -1;
          }
          if (target.data) {
              
              target.convertTo(target, CV_8UC1);

              equalizeHist(target,target);
              
              target.convertTo(target, CV_32FC1);
              
              int index = query(target,ef); 
              index+= startnum;
              
              char temp[3];
              sprintf(temp, "%d", index);
              
              std::string s(temp);
              std::cout << "face recognized, index : " << index << std::endl;
              
              //    imshow("target", ef.norm_0_255(target).reshape(1, dest_sz.x));
              imshow("result"+s,  ef.getdb()[index-startnum]);
              waitKey(0);
              destroyWindow("result"+s);
              return index;
              
          }
      }
  }
    return -1;
    
}
Exemplo n.º 4
0
    void Test::detectAndDisplay(Mat frame)
    {
        std::vector<Rect> faces;
        //Mat frame_gray;
        
		//@TODO : Understand what the lines below mean
		std::vector<cv::Mat> rgbChannels(3);
		cv::split(frame, rgbChannels);
		cv::Mat frame_gray = rgbChannels[2];
        
        // cvtColor( frame, frame_gray, CV_BGR2GRAY );
        //equalizeHist( frame_gray, frame_gray );
        
        //-- Detect faces
		//-- Calculate the face orientation and give it as a parameter to the new object person
		//-- Create a new person for each face detected
		//-- For each person detected, store information about that person in a database or something similar
        //face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
		//@TODO Understand what are the arguments used in detectMultiScale
        face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
        
        for( int i = 0; i < faces.size(); i++ )
        {
            Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
            ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );			rectangle( frame, center, center, CV_RGB(0, 255,0), 1);
            Mat faceROI = frame_gray( faces[i] );
            std::vector<Rect> eyes;
            std::vector<Rect> mouth;
            
            //-- In each face, detect eyes
            eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
            
            for( int j = 0; j < eyes.size(); j++ )
            {
                Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
				rectangle(frame,    cvPoint(faces[i].x + eyes[j].x, faces[i].y + eyes[j].y),
                          cvPoint(faces[i].x + eyes[j].x + eyes[j].width ,faces[i].y + eyes[j].y + eyes[j].height),
                          CV_RGB(0, 0, 255),
                          1, 8, 0
                          );
            }
            
			//-- In each face, detect mouth
			performFeatureDetection( mouth, faces, faceROI, mouth_cascade, frame, i);
        }
        //-- Show what you got
        imshow( window_name, frame );
    }
Exemplo n.º 5
0
//returns a 2D vector of where the eyes are pointing
void getGazePosition(Mat &frame){
	std::vector<Rect> faces;

	//get blue channel, less noisy
	std::vector<Mat> rgbChannels(3);
	split(frame, rgbChannels);
	Mat frame_gray = rgbChannels[2];

	//detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, Size(150, 150) );

	//show rectangle around faces
	for(int i = 0; i < faces.size(); i++ )
	{
		rectangle(frame, faces[i], 1234);
	}

	//find eyes in the first (hopefully only) face
	if (faces.size() > 0) {
		getEyeVectors(frame, frame_gray, faces[0]);
	}

	//imshow("cam", frame);
}
Exemplo n.º 6
0
/**
 * @function detectAndDisplay
 */
cv::Mat detectAndDisplay( cv::Mat frame ) {
  std::vector<cv::Rect> faces;
  //cv::Mat frame_gray;

  std::vector<cv::Mat> rgbChannels(3);
  cv::split(frame, rgbChannels);
  cv::Mat frame_gray = rgbChannels[2];

  //cvtColor( frame, frame_gray, CV_BGR2GRAY );
  //equalizeHist( frame_gray, frame_gray );
  //cv::pow(frame_gray, CV_64F, frame_gray);
  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(150, 150) );
//  findSkin(debugImage);

  for( int i = 0; i < faces.size(); i++ )
  {
    rectangle(debugImage, faces[i], 1234);
  }
  //-- Show what you got
  if (faces.size() > 0) {
    return findEyes(frame_gray, faces[0]);
  }
}
Exemplo n.º 7
0
static int runFusibile (int argc,
                        char **argv,
                        AlgorithmParameters &algParameters
                       )
{
    InputFiles inputFiles;
    string ext = ".png";

    string results_folder = "results/";

    const char* results_folder_opt     = "-input_folder";
    const char* p_input_folder_opt = "-p_folder";
    const char* krt_file_opt = "-krt_file";
    const char* images_input_folder_opt = "-images_folder";
    const char* gt_opt = "-gt";
    const char* gt_nocc_opt = "-gt_nocc";
    const char* pmvs_folder_opt = "--pmvs_folder";
    const char* remove_black_background_opt = "-remove_black_background";
    //read in arguments
    for ( int i = 1; i < argc; i++ )
    {
        if ( strcmp ( argv[i], results_folder_opt ) == 0 ){
            results_folder = argv[++i];
            cout << "input folder is " << results_folder << endl;

        }else if ( strcmp ( argv[i], p_input_folder_opt ) == 0 ){
            inputFiles.p_folder = argv[++i];
        }
        else if ( strcmp ( argv[i], krt_file_opt ) == 0 )
            inputFiles.krt_file = argv[++i];
        else if ( strcmp ( argv[i], images_input_folder_opt ) == 0 ){
            inputFiles.images_folder = argv[++i];

        }else if ( strcmp ( argv[i], gt_opt ) == 0 ){
            inputFiles.gt_filename = argv[++i];

        }else if ( strcmp ( argv[i], gt_nocc_opt ) == 0 ){
            inputFiles.gt_nocc_filename = argv[++i];
        }
        else if ( strncmp ( argv[i], pmvs_folder_opt, strlen ( pmvs_folder_opt ) ) == 0 ) {
            inputFiles.pmvs_folder = argv[++i];
        }
        else if ( strcmp ( argv[i], remove_black_background_opt ) == 0 )
            algParameters.remove_black_background = true;
    }

    if (inputFiles.pmvs_folder.size()>0) {
        inputFiles.images_folder = inputFiles.pmvs_folder + "/visualize/";
        inputFiles.p_folder = inputFiles.pmvs_folder + "/txt/";
    }
    cout <<"image folder is " << inputFiles.images_folder << endl;
    cout <<"p folder is " << inputFiles.images_folder << endl;
    cout <<"pmvs folder is " << inputFiles.pmvs_folder << endl;

    GTcheckParameters gtParameters;

    gtParameters.dispTolGT = 0.1f;
    gtParameters.dispTolGT2 = 0.02f;
    gtParameters.divFactor = 1.0f;
    // create folder to store result images
    time_t timeObj;
    time ( &timeObj );
    tm *pTime = localtime ( &timeObj );

    vector <Mat_<Vec3f> > view_vectors;


    time(&timeObj);
    pTime = localtime(&timeObj);

    char output_folder[256];
    sprintf(output_folder, "%s/consistencyCheck-%04d%02d%02d-%02d%02d%02d/",results_folder.c_str(), pTime->tm_year+1900, pTime->tm_mon+1,pTime->tm_mday,pTime->tm_hour, pTime->tm_min, pTime->tm_sec);
#if defined(_WIN32)
    _mkdir(output_folder);
#else
    mkdir(output_folder, 0777);
#endif

    vector<string> subfolders;
    get_subfolders(results_folder.c_str(), subfolders);
    std::sort(subfolders.begin(), subfolders.end());

    vector< Mat_<Vec3b> > warpedImages;
    vector< Mat_<Vec3b> > warpedImages_inverse;
    //vector< Mat_<float> > depthMaps;
    vector< Mat_<float> > updateMaps;
    vector< Mat_<Vec3f> > updateNormals;
    vector< Mat_<float> > depthMapConsistent;
    vector< Mat_<Vec3f> > normalsConsistent;
    vector< Mat_<Vec3f> > groundTruthNormals;
    vector< Mat_<uint8_t> > valid;


    map< int,string> consideredIds;
    for(size_t i=0;i<subfolders.size();i++) {
        //make sure that it has the right format (DATE_TIME_INDEX)
        size_t n = std::count(subfolders[i].begin(), subfolders[i].end(), '_');
        if(n < 2)
            continue;
        if (subfolders[i][0] != '2')
            continue;

        //get index
        //unsigned found = subfolders[i].find_last_of("_");
        //find second index
        unsigned posFirst = subfolders[i].find_first_of("_") +1;
        unsigned found = subfolders[i].substr(posFirst).find_first_of("_") + posFirst +1;
        string id_string = subfolders[i].substr(found);
        //InputData dat;

        //consideredIds.push_back(id_string);
        consideredIds.insert(pair<int,string>(i,id_string));
        //cout << "id_string is " << id_string << endl;
        //cout << "i is " << i << endl;
        //char outputPath[256];
        //sprintf(outputPath, "%s.png", id_string);

        if( access( (inputFiles.images_folder + id_string + ".png").c_str(), R_OK ) != -1 )
            inputFiles.img_filenames.push_back((id_string + ".png"));
        else if( access( (inputFiles.images_folder + id_string + ".jpg").c_str(), R_OK ) != -1 )
            inputFiles.img_filenames.push_back((id_string + ".jpg"));
        else if( access( (inputFiles.images_folder + id_string + ".ppm").c_str(), R_OK ) != -1 )
            inputFiles.img_filenames.push_back((id_string + ".ppm"));
    }
    size_t numImages = inputFiles.img_filenames.size ();
    cout << "numImages is " << numImages << endl;
    cout << "img_filenames is " << inputFiles.img_filenames.size() << endl;
    algParameters.num_img_processed = min ( ( int ) numImages, algParameters.num_img_processed );

    vector<Mat_<Vec3b> > img_color; // imgLeft_color, imgRight_color;
    vector<Mat_<uint8_t> > img_grayscale;
    for ( size_t i = 0; i < numImages; i++ ) {
        //printf ( "Opening image %ld: %s\n", i, ( inputFiles.images_folder + inputFiles.img_filenames[i] ).c_str () );
        img_grayscale.push_back ( imread ( ( inputFiles.images_folder + inputFiles.img_filenames[i] ), IMREAD_GRAYSCALE ) );
        if ( algParameters.color_processing ) {
            img_color.push_back ( imread ( ( inputFiles.images_folder + inputFiles.img_filenames[i] ), IMREAD_COLOR ) );
        }

        if ( img_grayscale[i].rows == 0 ) {
            printf ( "Image seems to be invalid\n" );
            return -1;
        }
    }

    size_t avail;
    size_t total;
    cudaMemGetInfo( &avail, &total );
    size_t used = total - avail;
    printf("Device memory used: %fMB\n", used/1000000.0f);

    GlobalState *gs = new GlobalState;
	gs->cameras = new CameraParameters_cu;
	gs->pc = new PointCloud;
    cudaMemGetInfo( &avail, &total );
    used = total - avail;
    printf("Device memory used: %fMB\n", used/1000000.0f);

    uint32_t rows = img_grayscale[0].rows;
    uint32_t cols = img_grayscale[0].cols;

    CameraParameters camParams = getCameraParameters (*(gs->cameras),
                                                      inputFiles,algParameters.depthMin,
                                                      algParameters.depthMax,
                                                      algParameters.cam_scale,
                                                      false);
    printf("Camera size is %lu\n", camParams.cameras.size());

    for ( int i = 0; i < algParameters.num_img_processed; i++ ) {
        algParameters.min_disparity = disparityDepthConversion ( camParams.f, camParams.cameras[i].baseline, camParams.cameras[i].depthMax );
        algParameters.max_disparity = disparityDepthConversion ( camParams.f, camParams.cameras[i].baseline, camParams.cameras[i].depthMin );
    }

    selectViews ( camParams, cols, rows, false);
    int numSelViews = camParams.viewSelectionSubset.size ();
    cout << "Selected views: " << numSelViews << endl;
    gs->cameras->viewSelectionSubsetNumber = numSelViews;
    ofstream myfile;
    for ( int i = 0; i < numSelViews; i++ ) {
        cout << camParams.viewSelectionSubset[i] << ", ";
        gs->cameras->viewSelectionSubset[i] = camParams.viewSelectionSubset[i];
    }
    cout << endl;

    vector<InputData> inputData;

    cout << "Reading normals and depth from disk" << endl;
    cout << "Size consideredIds is " << consideredIds.size() << endl;
    for (map<int,string>::iterator it=consideredIds.begin(); it!=consideredIds.end(); ++it){

        //get corresponding camera
        int i = it->first;
        string id = it->second;//consideredIds[i];
        //int id = atoi(id_string.c_str());
        int camIdx = getCameraFromId(id,camParams.cameras);
        //cout << "id is " << id << endl;
        //cout << "camIdx is " << camIdx << endl;
        if(camIdx < 0)// || camIdx == camParams.idRef)
            continue;

        InputData dat;
        dat.id = id;
        dat.camId = camIdx;
        dat.cam = camParams.cameras[camIdx];
        dat.path = results_folder + subfolders[i];
        dat.inputImage = imread((inputFiles.images_folder + id + ext), IMREAD_COLOR);

        //read normal
        cout << "Reading normal " << i << endl;
        readDmbNormal((dat.path + "/normals.dmb").c_str(),dat.normals);

        //read depth
        cout << "Reading disp " << i << endl;
        readDmb((dat.path + "/disp.dmb").c_str(),dat.depthMap);

        //inputData.push_back(move(dat));
        inputData.push_back(dat);

    }
    // run gpu run
    // Init parameters
    gs->params = &algParameters;


    // Init ImageInfo
    //gs->iminfo.cols = img_grayscale[0].cols;
    //gs->iminfo.rows = img_grayscale[0].rows;
    gs->cameras->cols = img_grayscale[0].cols;
    gs->cameras->rows = img_grayscale[0].rows;
    gs->params->cols = img_grayscale[0].cols;
    gs->params->rows = img_grayscale[0].rows;
    gs->resize (img_grayscale.size());
    gs->pc->resize (img_grayscale[0].rows * img_grayscale[0].cols);
	PointCloudList pc_list;
    pc_list.resize (img_grayscale[0].rows * img_grayscale[0].cols);
    pc_list.size=0;
    pc_list.rows = img_grayscale[0].rows;
    pc_list.cols = img_grayscale[0].cols;
    gs->pc->rows = img_grayscale[0].rows;
    gs->pc->cols = img_grayscale[0].cols;

    // Resize lines
    for (size_t i = 0; i<img_grayscale.size(); i++)
    {
        gs->lines[i].resize(img_grayscale[0].rows * img_grayscale[0].cols);
        gs->lines[i].n = img_grayscale[0].rows * img_grayscale[0].cols;
        //gs->lines.s = img_grayscale[0].step[0];
        gs->lines[i].s = img_grayscale[0].cols;
        gs->lines[i].l = img_grayscale[0].cols;
    }

    vector<Mat > img_grayscale_float           (img_grayscale.size());
    vector<Mat > img_color_float               (img_grayscale.size());
    vector<Mat > img_color_float_alpha         (img_grayscale.size());
    vector<Mat > normals_and_depth             (img_grayscale.size());
    vector<Mat_<uint16_t> > img_grayscale_uint (img_grayscale.size());
    for (size_t i = 0; i<img_grayscale.size(); i++)
    {
        //img_grayscale[i].convertTo(img_grayscale_float[i], CV_32FC1, 1.0/255.0); // or CV_32F works (too)
        img_grayscale[i].convertTo(img_grayscale_float[i], CV_32FC1); // or CV_32F works (too)
        img_grayscale[i].convertTo(img_grayscale_uint[i], CV_16UC1); // or CV_32F works (too)
        if(algParameters.color_processing) {
            vector<Mat_<float> > rgbChannels ( 3 );
            img_color_float_alpha[i] = Mat::zeros ( img_grayscale[0].rows, img_grayscale[0].cols, CV_32FC4 );
            img_color[i].convertTo (img_color_float[i], CV_32FC3); // or CV_32F works (too)
            Mat alpha( img_grayscale[0].rows, img_grayscale[0].cols, CV_32FC1 );
            split (img_color_float[i], rgbChannels);
            rgbChannels.push_back( alpha);
            merge (rgbChannels, img_color_float_alpha[i]);
        }
        /* Create vector of normals and disparities */
        vector<Mat_<float> > normal ( 3 );
        normals_and_depth[i] = Mat::zeros ( img_grayscale[0].rows, img_grayscale[0].cols, CV_32FC4 );
        split (inputData[i].normals, normal);
        normal.push_back( inputData[i].depthMap);
        merge (normal, normals_and_depth[i]);

    }
    //int64_t t = getTickCount ();

    // Copy images to texture memory
    if (algParameters.saveTexture) {
        if (algParameters.color_processing)
            addImageToTextureFloatColor (img_color_float_alpha, gs->imgs);
        else
            addImageToTextureFloatGray (img_grayscale_float, gs->imgs);
    }

    addImageToTextureFloatColor (normals_and_depth, gs->normals_depths);

#define pow2(x) ((x)*(x))
#define get_pow2_norm(x,y) (pow2(x)+pow2(y))

    runcuda(*gs, pc_list, numSelViews);
    Mat_<Vec3f> norm0 = Mat::zeros ( img_grayscale[0].rows, img_grayscale[0].cols, CV_32FC3 );
    Mat_<float> distImg;
    char plyFile[256];
    sprintf ( plyFile, "%s/final3d_model.ply", output_folder);
    printf("Writing ply file %s\n", plyFile);
    //storePlyFileAsciiPointCloud ( plyFile, pc_list, inputData[0].cam, distImg);
    storePlyFileBinaryPointCloud ( plyFile, pc_list, distImg);
    char xyzFile[256];
    sprintf ( xyzFile, "%s/final3d_model.xyz", output_folder);
    printf("Writing ply file %s\n", xyzFile);
    //storeXYZPointCloud ( xyzFile, pc_list, inputData[0].cam, distImg);

    return 0;
}