コード例 #1
0
int detect(int argc, char* argv[]) {

    cout << "Testing L1 started" << endl;
    if(argc<3) {
        cerr << "Usage: HFTrainDetect config.txt 1 [image] [detection]" << endl;
        return -1;
    }

    // read config file
    StructParam param;
    if(!param.loadConfigDetect(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // timer
    timeval start, end;
    double runtime;
    gettimeofday(&start, NULL);

    // load forest
    HFForest_L1 forest(&param);
    forest.loadForest(param.treepath_L1);


    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    // detect hypotheses on all images
    for(unsigned int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        Mat originImg = imread((param.image_path+"/"+TestD.AnnoData[i].image_name).c_str()); // originImg is UINT8_3Channel
        string depthFileName = param.depth_image_path+"/"+TestD.AnnoData[i].image_name.substr(0,TestD.AnnoData[i].image_name.size()-4)+"_abs_smooth.png";
        Mat depthImg  = imread(depthFileName,CV_LOAD_IMAGE_ANYDEPTH);    // depthImg is UINT16

        if(originImg.empty()) {
            cerr << "Could not read image file " << param.image_path << "/" << TestD.AnnoData[i].image_name << endl;
            continue;
        }

        // detect
        Hypotheses hyp;
        if(param.feature_path.empty()) {
            forest.detect(TestD.AnnoData[i].image_name,originImg,depthImg,hyp);
        }

        #if 0
        // evaluate
        Annotation train;
        hyp.check(TestD.AnnoData[i], param.d_thres, train);
        #endif

        // save detections
        hyp.save_detections( (param.hypotheses_path+"/"+TestD.AnnoData[i].image_name+".txt").c_str());

        #if 0
        // show detections
        hyp.show_detections(originImg, param.d_thres);
        #endif

    }

    gettimeofday(&end, NULL);
    runtime = ( (end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/(1000.0) );
    cout << "Total runtime (L1 test): " << runtime << " msec" << endl;

    return 0;

}
コード例 #2
0
// main routine
int main(int argc, char* argv[]) {

    //%%%%%%%%%%%%%%%%%%%%%%%% init %%%%%%%%%%%%%%%%%%%%%%%%

    // read arguments
    if(argc<3) {
        cerr << "Usage: ComputeFeatures config.txt override(no(0)/yes(1))" << endl;
        exit(-1);
    }

    // read config file
    StructParam param;
    if(!param.loadConfigFeature(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // read test/anno data (uses same data structure)
    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    //if(atoi(argv[2])==2)
    //system(("rm " + param.feature_path + "/*.pgm").c_str());


    // detect hypotheses on all images
    for(int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        Mat originImg = imread((param.image_path+"/"+TestD.AnnoData[i].image_name).c_str());
        if(originImg.empty()) {
            cerr << "Could not read image file " << param.image_path << "/" << TestD.AnnoData[i].image_name << endl;
            continue;
        }

        cout << system(("mkdir " + param.feature_path + "/" + TestD.AnnoData[i].image_name).c_str());

        // extract features
        for(int k=0; k<param.scales.size(); ++k) {

            Features Feat;
            string fname(param.feature_path+"/"+TestD.AnnoData[i].image_name+"/"+TestD.AnnoData[i].image_name);
            if( atoi(argv[2])==1 || !Feat.loadFeatures( fname, param.scales[k]) ) {

                Mat scaledImg;
                resize(originImg, scaledImg, Size(int(originImg.cols * param.scales[k] + 0.5), int(originImg.rows * param.scales[k] + 0.5)) );
                Feat.extractFeatureChannels(scaledImg);
                Feat.saveFeatures( fname, param.scales[k]);

#if 0
                // debug!!!!
                Features Feat2;
                namedWindow( "ShowF", CV_WINDOW_AUTOSIZE );
                imshow( "ShowF", Feat.Channels[0] );

                Feat2.loadFeatures( fname, param.scales[k]);

                namedWindow( "ShowF2", CV_WINDOW_AUTOSIZE );
                imshow( "ShowF2", Feat2.Channels[0] );

                cout << scaledImg.rows << " " << scaledImg.cols << " " << scaledImg.depth() << " " << scaledImg.channels() << " " << scaledImg.isContinuous() << endl;
                cout << Feat.Channels[0].rows << " " << Feat.Channels[0].cols << " " << Feat.Channels[0].depth() << " " << Feat.Channels[0].channels() << " " << Feat.Channels[0].isContinuous() << endl;
                cout << Feat2.Channels[0].rows << " " << Feat2.Channels[0].cols << " " << Feat2.Channels[0].depth() << " " << Feat2.Channels[0].channels() << " " << Feat.Channels[0].isContinuous() << endl;


                Mat diff(Size(scaledImg.cols,scaledImg.rows),CV_8UC1);
                cout << diff.rows << " " << diff.cols << " " << diff.depth() << " " << diff.channels() << " " << scaledImg.isContinuous() << endl;

                diff = Feat.Channels[0] - Feat2.Channels[0];

                namedWindow( "ShowDiff", CV_WINDOW_AUTOSIZE );
                imshow( "ShowDiff", diff );
                waitKey(0);
#endif
            }

        }

    }

    return 0;

}
コード例 #3
0
int detect_L2(int argc, char* argv[]) {

    cout << "Testing L2 started" << endl;
    if(argc<3) {
        cerr << "Usage: HFTrainDetect config.txt 1 [image] [detection]" << endl;
        return -1;
    }

    // timer
    timeval start, end;
    gettimeofday(&start, NULL);
    double runtime=0;

    // read config file
    StructParam param;
    if(!param.loadConfigDetect_L2(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // load first layer forest
    HFForest_L1 forest_L1(&param);
    forest_L1.loadForest(param.treepath_L1);

    // load second layer forest
    HFForest_L2 forest_L2(&param);
    forest_L2.loadForest(param.treepath_L2);

    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    // detect hypotheses on all images
    for(unsigned int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        string fileName = param.image_path+"/"+TestD.AnnoData[i].image_name;
        string depthFileName = param.depth_image_path+"/"+TestD.AnnoData[i].image_name.substr(0,TestD.AnnoData[i].image_name.size()-4)+"_abs_smooth.png";
        Mat originImg = imread(fileName);
        Mat depthImg  = imread(depthFileName,CV_LOAD_IMAGE_ANYDEPTH);    // depthImg is UINT16

        // calculate leaf id maps using first layer forest
        cout<<"evaluating leafId maps"<<endl;
        vector<vector<vector<Mat> > > leafIdMaps;
        forest_L1.evaluateLeafIdMaps(originImg, depthImg, leafIdMaps);

//        // get the vote maps from the first layer
//        cout<<"evaluating L1 vote maps"<<endl;
//        vector<vector<Mat> > voteMaps_L1;
//        forest_L1.returnVoteMaps(originImg, depthImg, voteMaps_L1);

        // get the vote maps from the second layer
        cout<<"evaluating L2 vote maps"<<endl;
        vector<vector<Mat> > voteMaps_L2;
        forest_L2.returnVoteMaps(leafIdMaps,voteMaps_L2,originImg);

#if 0
        namedWindow("show",CV_WINDOW_AUTOSIZE);
        for(unsigned int aspIdx=0; aspIdx<param.asp_ratios.size(); ++aspIdx){
            for(unsigned int sclIdx=0; sclIdx<param.scales.size(); ++sclIdx){
                Mat show;
                voteMaps_L2[aspIdx][sclIdx].convertTo(show,CV_8U,255*0.05);
                imshow("show",show);
                waitKey(0);
            }
        }
#endif

        Hypotheses hyp;
        forest_L2.detect(hyp,voteMaps_L2);
//        hyp.save_detections((param.hypotheses_path+"/"+TestD.AnnoData[i].image_name+".txt").c_str());
//        hyp.show_detections(originImg,param.d_thres);

//        // pass leafIdMaps to second layer forest for detection
//        cout<<"evaluating combined detection"<<endl;
//        vector<Hypotheses> bigHyp;
//        forest_L2.detect(bigHyp, voteMaps_L1, voteMaps_L2);
//
//        // save detections
//        for(unsigned int hypIdx=0; hypIdx<bigHyp.size(); ++hypIdx){
//            char buffer[5];
//            sprintf(buffer,"%02d",hypIdx);
//            string strBuffer = buffer;
//            bigHyp[hypIdx].save_detections( (param.hypotheses_path+"/lambda"+strBuffer+"/"+TestD.AnnoData[i].image_name+".txt").c_str());
//        }
    }

    gettimeofday(&end, NULL);
    runtime = ( (end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/(1000.0) );
    cout << "Total runtime (L2 test): " << runtime << " msec" << endl;

    return 0;

}