Exemple #1
0
bool consoleInput (const std::vector<std::string>& _args,
                   evg::SrcVideo& _inputVideo, std::string& _outputVideoPath,
                   DetectorType& _detectorType, const DetectorType defaultDetectorType,
                   TrackerType& _trackerType, const TrackerType defaultTrackerType,
                   double& _redetectionTime,
                   bool& _restart)
{
    try {
        ValuesConstraint<string> allowedDetectorVals( makeAllowedDetectorVals() );
        ValuesConstraint<string> allowedTrackerVals( makeAllowedTrackerVals() );

        // make parsing construction
        //   4th arg 'false' disables help. Help results in exit in TCLAP
        CmdLine cmd("Set/reset all parameters", ' ', "0", false);
        ValueArg<string> cmdDetector ("d", "detector", "number or name of detector.",
                                           false, "default", &allowedDetectorVals, cmd);
        ValueArg<string> cmdTracker  ("t", "tracker", "number or name of tracker",
                                           false, "default", &allowedTrackerVals, cmd);
        ValueArg<double> cmdPeriod   ("p", "period", "redetection period, in range [0; 4]",
                                           false, 2., "double", cmd);
        ValueArg<string> cmdInput    ("i", "input", "input video file path",
                                           false, "", "string", cmd);
        SwitchArg cmdCamera          ("c", "camera", "indicate when using camera", cmd, false);
        ValueArg<string> cmdOutput   ("o", "output", "output video path",
                                           false, "", "string", cmd);
        SwitchArg cmdRestart         ("r", "restart", "restart the video", cmd, false);

        // parse
        //   for some reason TCLAP does not have the const qualifier
        std::vector<std::string> args = _args;
        cmd.parse(args);


        // parse restart
        if (cmdRestart.isSet())
            _restart = true;
        else _restart = false;

        // parse inputVideo
        // if input method is set, we restart
        if (cmdCamera.isSet() || cmdInput.isSet())
            _restart = true;
        // set the new input methd
        if (cmdCamera.isSet())
            _inputVideo = evg::SrcVideo(evg::SrcVideo::CAMERA, "");
        else if (cmdInput.isSet())
            _inputVideo = evg::SrcVideo(evg::SrcVideo::FILE, cmdInput.getValue());
        // no way to set mutually exclusive but non-required arguments => check manually
        if (cmdCamera.isSet() && cmdInput.isSet())
        {
            cerr << "Input from both camera and file cannot be specified." << endl;
            return 0;
        }

        // parse outputVideoPath
        if (cmdOutput.isSet())
            _outputVideoPath = cmdOutput.getValue();

        // parse detectorType
        if (cmdDetector.isSet())
        {
            if (cmdDetector.getValue() == "default")
                _detectorType = defaultDetectorType;
            else if (cmdDetector.getValue() == "1" || cmdDetector.getValue() == "opencv")
                _detectorType = OPENCV_DETECTOR;
            else if (cmdDetector.getValue() == "2" || cmdDetector.getValue() == "kyle")
                _detectorType = KYLE_DETECTOR;
            else assert(0);
        }

        // parse trackerType
        if (cmdTracker.isSet())
        {
            if (cmdTracker.getValue() == "default")
                _trackerType = defaultTrackerType;
            else if (cmdTracker.getValue() == "1" || cmdTracker.getValue() == "optical_flow")
                _trackerType = OPTICAL_FLOW_TRACKER;
            else if (cmdTracker.getValue() == "2" || cmdTracker.getValue() == "camshift")
                _trackerType = CAMSHIFT_TRACKER;
            else if (cmdTracker.getValue() == "3" || cmdTracker.getValue() == "compressive")
                _trackerType = COMPRESSIVE_TRACKER;
            else if (cmdTracker.getValue() == "4" || cmdTracker.getValue() == "kyle")
                _trackerType = KYLE_TRACKER;
            else assert(0);
        }

        // parse redetection time
        if (cmdPeriod.isSet())
            _redetectionTime = cmdPeriod.getValue();

        return 1;
    } catch (...) {
        std::cerr << "consoleInput(): exception caught" << std::endl;
        return 0;
    }
}
Exemple #2
0
bool consoleFirstInput (int argc, char *argv[],
                        evg::SrcVideo& _inputVideo, std::string& _outputVideoPath,
                        DetectorType& _detectorType, const DetectorType defaultDetectorType,
                        TrackerType& _trackerType, const TrackerType defaultTrackerType,
                        double& _redetectionTime)
{
    try {
        ValuesConstraint<string> allowedDetectorVals( makeAllowedDetectorVals() );
        ValuesConstraint<string> allowedTrackerVals( makeAllowedTrackerVals() );

        // make parsing construction
        CmdLine cmd("Set/reset all parameters", ' ', "0");
        ValueArg<string> cmdDetector ("d", "detector", "number or name of detector.",
                                           false, "default", &allowedDetectorVals, cmd);
        ValueArg<string> cmdTracker  ("t", "tracker", "number or name of tracker",
                                           false, "default", &allowedTrackerVals, cmd);
        ValueArg<double> cmdPeriod   ("p", "period", "redetection period, in range [0; 4]",
                                           false, 2., "double", cmd);
        ValueArg<string> cmdInput    ("i", "input", "input video file path",
                                           false, "", "string");
        SwitchArg cmdCamera ("c", "camera", "indicate when using camera", false);
        cmd.xorAdd( cmdInput, cmdCamera );
        ValueArg<string> cmdOutput  ("o", "output", "output video path",
                                           false, "", "string", cmd);

        // parse
        cmd.parse(argc, argv);


        // parse inputVideo
        if (cmdCamera.isSet())
            _inputVideo = evg::SrcVideo(evg::SrcVideo::CAMERA, "");
        else if (cmdInput.isSet())
            _inputVideo = evg::SrcVideo(evg::SrcVideo::FILE, cmdInput.getValue());
        else
            assert(0);

        // parse outputVideoPath
        _outputVideoPath = cmdOutput.getValue();

        // parse detectorType
        if (cmdDetector.isSet())
        {
            if (cmdDetector.getValue() == "default")
                _detectorType = defaultDetectorType;
            else if (cmdDetector.getValue() == "1" || cmdDetector.getValue() == "opencv")
                _detectorType = OPENCV_DETECTOR;
            else if (cmdDetector.getValue() == "2" || cmdDetector.getValue() == "kyle")
                _detectorType = KYLE_DETECTOR;
            else assert(0);
        }
        else
            _detectorType = defaultDetectorType;

        // parse trackerType
        if (cmdTracker.isSet())
        {
            if (cmdTracker.getValue() == "default")
                _trackerType = defaultTrackerType;
            else if (cmdTracker.getValue() == "1" || cmdTracker.getValue() == "optical_flow")
                _trackerType = OPTICAL_FLOW_TRACKER;
            else if (cmdTracker.getValue() == "2" || cmdTracker.getValue() == "camshift")
                _trackerType = CAMSHIFT_TRACKER;
            else if (cmdTracker.getValue() == "3" || cmdTracker.getValue() == "compressive")
                _trackerType = COMPRESSIVE_TRACKER;
            else if (cmdTracker.getValue() == "4" || cmdTracker.getValue() == "kyle")
                _trackerType = KYLE_TRACKER;
            else assert(0);
        }
        else
            _trackerType = defaultTrackerType;


        // parse redetection time
        if (cmdPeriod.isSet())
            _redetectionTime = cmdPeriod.getValue();
        else
            _redetectionTime = 2.;


        return 1;
    } catch (...) {
        std::cerr << "consoleFirstInput(): exception caught" << std::endl;
        return 0;
    }
}
void parseOptions ( int argc, char** argv )
{
    try
    {

        CmdLine cmd ( "keypoints", ' ', kVersion );

        MyOutput my;
        cmd.setOutput ( &my );

        SwitchArg aArgFullScale ( "","fullscale", "Uses full scale image to detect keypoints    (default:false)\n", false );
        // SURF has a better performance than the other descriptors, use it by default, if it is enabled
        ValueArg<int> aArgSurfScoreThreshold ( "","surfscore", "Detection score threshold    (default : 1000)\n", false, 1000, "int" );
        ValueArg<int> aArgSieve1Width ( "","sievewidth", "Interest point sieve: Number of buckets on width    (default : 10)", false, 10, "int" );
        ValueArg<int> aArgSieve1Height ( "","sieveheight",  "Interest point sieve : Number of buckets on height    (default : 10)", false, 10, "int" );
        ValueArg<int> aArgSieve1Size ( "","sievesize",	"Interest point sieve : Max points per bucket    (default : 10)\n", false, 10, "int" );
        ValueArg<std::string> aArgOutputFormat ( "","format", "Output format (text, autopano-xml, descperf), default text\n", false, "text", "string" );
        ValueArg<std::string> aArgOutputFile ( "o","output", "Output file. If not specified, print to standard out\n", false, "", "string" );
        SwitchArg aArgInterestPoints ( "","interestpoints", "output only the interest points and the scale (default:false)\n", false );
        ValueArg<std::string> aArgFixedInterestPoint ( "","ip", "Compute descriptor at x,y,scale,ori \n", false, "", "string" );

        cmd.add ( aArgSurfScoreThreshold );
        cmd.add ( aArgFullScale );
        cmd.add ( aArgSieve1Width );
        cmd.add ( aArgSieve1Height );
        cmd.add ( aArgSieve1Size );
        cmd.add ( aArgOutputFormat );
        cmd.add ( aArgOutputFile );
        cmd.add ( aArgInterestPoints );
        cmd.add ( aArgFixedInterestPoint );

        /*
        	SwitchArg aArgTest("t","test", "Enables test mode\n", false);
        	cmd.add( aArgTest );
        */

        UnlabeledMultiArg<string> aArgFiles ( "fileName", "Image files", true, "string" );
        cmd.add ( aArgFiles );

        cmd.parse ( argc,argv );

        //
        // Set variables
        //
        vector<string> aFiles = aArgFiles.getValue();
        if ( aFiles.size() != 1 )
        {
            exit ( 1 );
        }

        double surfScoreThreshold=1000;
        if ( aArgSurfScoreThreshold.isSet() )
        {
            surfScoreThreshold = ( aArgSurfScoreThreshold.getValue() );
        }

        bool downscale = true;
        if ( aArgFullScale.isSet() )
        {
            downscale = false;
        }

        int sieveWidth = 10;
        if ( aArgSieve1Width.isSet() )
        {
            sieveWidth = aArgSieve1Width.getValue();
        }
        int sieveHeight = 10;
        if ( aArgSieve1Height.isSet() )
        {
            sieveHeight = aArgSieve1Height.getValue();
        }
        int sieveSize = 10;
        if ( aArgSieve1Size.isSet() )
        {
            sieveSize = aArgSieve1Size.getValue();
        }

        bool onlyInterestPoints = false;
        if ( aArgInterestPoints.isSet() )
        {
            onlyInterestPoints = true;
        }

        std::ostream* outstream;
        if ( aArgOutputFile.isSet() )
        {
            outstream = new std::ofstream(aArgOutputFile.getValue().c_str());
        }
        else
        {
            outstream = & std::cout;
        }

        KeypointWriter* writer = 0;
        std::string outputformat = "text";
        if ( aArgOutputFormat.isSet() )
        {
            outputformat = aArgOutputFormat.getValue();
        }
        if (outputformat == "text")
        {
            writer = new SIFTFormatWriter(*outstream);
        }
        else if (outputformat == "autopano-sift-xml")
        {
            writer = new AutopanoSIFTWriter(*outstream);
        }
        else if (outputformat == "descperf")
        {
            writer = new DescPerfFormatWriter(*outstream);
        }
        else
        {
            std::cerr << "Unknown output format, valid values are text, autopano-sift-xml, descperf" << std::endl;
            exit(1);
        }


        KeyPointPtr preKPPtr;
        if ( aArgFixedInterestPoint.isSet() )
        {
            preKPPtr = KeyPointPtr(new KeyPoint());
            preKPPtr->_x = -10001;
            preKPPtr->_ori = -10001;
            int nf = sscanf(aArgFixedInterestPoint.getValue().c_str(), "%lf:%lf:%lf:%lf",
                            &(preKPPtr->_x), &(preKPPtr->_y), &(preKPPtr->_scale), &(preKPPtr->_ori));
            std::cerr << "passed orientation: " << preKPPtr->_ori << std::endl;
            if (nf < 3)
            {
                std::cerr << "Invalid value for --ip option, expected --ip x:y:scale:ori" << std::endl;
                exit(1);
            }
        }

        DetectKeypoints ( aFiles[0], downscale, surfScoreThreshold, preKPPtr, onlyInterestPoints, sieveWidth, sieveHeight, sieveSize, *writer );

        if ( aArgOutputFile.isSet() )
        {
            delete outstream;
        }

    }
    catch ( ArgException& e )
    {
        cout << "ERROR: " << e.error() << " " << e.argId() << endl;
    }
}
int main(int argc, const char * argv[])
{
    // parse input
    CmdLine cmd ("match a pair of images using specified features");
    
    vector<string> featureTypes;
    featureTypes.push_back("sift");
    featureTypes.push_back("surf");
    featureTypes.push_back("orb");
    featureTypes.push_back("brisk");
    ValuesConstraint<string> cmdFeatureTypes( featureTypes );
    ValueArg<string> cmdFeature("f", "feature", "feature type", true, "", &cmdFeatureTypes, cmd);
    
    ValueArg<string> cmd1st ("1", "1st", "1st image file path", true, "", "string", cmd);
    ValueArg<string> cmd2nd ("2", "2nd", "2nd image file path", true, "", "string", cmd);
    ValueArg<float> cmdThresh ("t", "threshold", "threshold for matching, 0-1, higher gives more matches", true, 3, "float", cmd);
    ValueArg<string> cmdOutM  ("o", "outmat", "file path for matches", false, "/dev/null", "string", cmd);
    SwitchArg cmdDisableImshow ("", "disable_image", "don't show image", cmd);
    MultiSwitchArg cmdVerbose ("v", "", "level of verbosity of output", cmd);
    
    cmd.parse(argc, argv);
    string           featureType    = cmdFeature.getValue();
    float            threshold      = cmdThresh.getValue();
    string           imageName1     = cmd1st.getValue();
    string           imageName2     = cmd2nd.getValue();
    string           outMName       = cmdOutM.getValue();
    bool             disableImshow  = cmdDisableImshow.getValue();
    int              verbose        = cmdVerbose.getValue();
    
    // file for output
    path outMPath = absolute(path(outMName));
    if (! exists(outMPath.parent_path()))
    {
        cerr << "parent path " << outMPath.parent_path() << " doesn't exist." << endl;
        return -1;
    }
    if (is_directory(outMPath))
    {
        cerr << "writeSimpleMatches: Need a filename, not a directory: " << outMPath << endl;
        return -1;
    }
    
    // load images
    Mat im1, im2;
    if (!evg::loadImage(imageName1, im1)) return 0;
    if (!evg::loadImage(imageName2, im2)) return 0;
    
    // setup detectors
    Ptr<FeatureDetector> detector = newFeatureDetector (featureType);
    Ptr<DescriptorExtractor> extractor = newDescriptorExtractor (featureType);
    Ptr<DescriptorMatcher> matcher = newMatcher (featureType, verbose);
    
    // match
    vector<KeyPoint> keypoints1, keypoints2;
    Mat descriptors1, descriptors2;
    vector< vector<DMatch> > matchesPairs;
    
    detector->detect (im1, keypoints1);
    detector->detect (im2, keypoints2);
    extractor->compute (im1, keypoints1, descriptors1);
    extractor->compute (im2, keypoints2, descriptors2);
    matcher->knnMatch (descriptors1, descriptors2, matchesPairs, 2);

    // filter based on relative distance to the two closest
    vector<DMatch> matches;
    matches.reserve (matchesPairs.size());
    for (int i = 0; i != matchesPairs.size(); ++i)
    {
        float ratio = matchesPairs[i][0].distance / matchesPairs[i][1].distance;
        if (ratio < threshold)
        {
            if (verbose >= 2) cout << ratio << " ";
            matchesPairs[i][0].distance = ratio;
            matches.push_back (matchesPairs[i][0]);
        }
    }
    if (verbose >= 2) cout << endl;

    
    // write results
    evg::writeSimpleMatches (outMPath.string(), imageName1, imageName2, keypoints1, keypoints2, matches);
    
    if (!disableImshow)
    {
        Mat im1gray, im2gray;
        cvtColor(im1, im1gray, CV_RGB2GRAY);
        cvtColor(im2, im2gray, CV_RGB2GRAY);
        float factor = float(1440) / im1gray.cols / 2;
        vector<KeyPoint> keypoints1im = keypoints1, keypoints2im = keypoints2;
        for (int i = 0; i != keypoints1im.size(); ++i)
        {
            keypoints1im[i].pt.x = keypoints1im[i].pt.x * factor;
            keypoints1im[i].pt.y = keypoints1im[i].pt.y * factor;
        }
        for (int i = 0; i != keypoints2im.size(); ++i)
        {
            keypoints2im[i].pt.x = keypoints2im[i].pt.x * factor;
            keypoints2im[i].pt.y = keypoints2im[i].pt.y * factor;
        }
        
        resize(im1gray, im1gray, Size(), factor, factor);
        resize(im2gray, im2gray, Size(), factor, factor);
        Mat imgMatches;
        drawMatches (im1gray, keypoints1im, im2gray, keypoints2im, matches, imgMatches,
                     Scalar::all(-1), Scalar::all(-1),
                     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        imshow( "matches", imgMatches );
        if (waitKey(0) == 27) return 0;
    }
    
    return 0;
}
int mcmc_pose_estimation(int argc, char ** argv)
{
  try {
    
    using namespace nuklei;
    using namespace TCLAP;
    
    CmdLine cmd("");
    
    UnlabeledValueArg<std::string> objectFileArg
    ("object_evidence",
     "Object file.",
     true, "", "filename", cmd);
    
    UnlabeledValueArg<std::string> sceneFileArg
    ("scene_evidence",
     "Scene file.",
     true, "", "filename", cmd);
    
    ValueArg<std::string> alignedObjectEvidenceFileArg
    ("", "aligned",
     "Transformed object evidence, matching object pose.",
     false, "", "filename", cmd);
    
    ValueArg<int> nArg
    ("n", "n_model_points",
     "Number of particle supporting the object model.",
     false, 0, "int", cmd);
    
    ValueArg<double> locHArg
    ("l", "loc_h",
     "Location kernel width.",
     false, 0, "float", cmd);
    
    ValueArg<double> oriHArg
    ("o", "ori_h",
     "Orientation kernel width (in radians).",
     false, 0.2, "float", cmd);
    
    ValueArg<int> nChainsArg
    ("c", "n_chains",
     "Number of MCMC chains.",
     false, 0, "int", cmd);
    
    ValueArg<std::string> bestTransfoArg
    ("", "best_transfo",
     "File to write the most likely transformation to.",
     false, "", "filename", cmd);
    
    SwitchArg computeNormalsArg
    ("", "normals",
     "Compute a normal vector for all input points. Makes pose estimation more robust.", cmd);
    
    SwitchArg lightArg
    ("", "light",
     "Limit the scene model to 10000 points, for speed.", cmd);

    SwitchArg accurateScoreArg
    ("s", "accurate_score",
     "Recompute the matching score using all input points (instead of using N points as given by -n N).", cmd);
    
    cmd.parse( argc, argv );
    
    // ------------- //
    // Read-in data: //
    // ------------- //
    
    KernelCollection objectEvidence, sceneEvidence;
    readObservations(objectFileArg.getValue(), objectEvidence);
    readObservations(sceneFileArg.getValue(), sceneEvidence);
    
    if (objectEvidence.size() == 0 || sceneEvidence.size() == 0)
      NUKLEI_THROW("Empty input cloud.");
    
      if (computeNormalsArg.getValue())
      {
        std::cout << "Computing normals for object model..." << std::endl;
        objectEvidence.buildNeighborSearchTree();
        objectEvidence.computeSurfaceNormals();
        std::cout << "Computing normals for object model... done." << std::endl;
      }
      else
        std::cout << "Warning: object model is an R3 cloud. " <<
        "Pose estimation will be suboptimal. Use --normals to fix this." <<
        std::endl;

    if (computeNormalsArg.getValue())
      {
        std::cout << "Computing normals for scene model..." << std::endl;
        sceneEvidence.buildNeighborSearchTree();
        sceneEvidence.computeSurfaceNormals();
        std::cout << "Computing normals for scene model... done." << std::endl;
      }
      else
        std::cout << "Warning: scene model is an R3 cloud. " <<
        "Pose estimation will be suboptimal. Use --normals to fix this." <<
        std::endl;
    
    if (objectEvidence.front().polyType() != sceneEvidence.front().polyType())
      NUKLEI_THROW("Input point clouds must be defined on the same domain.");
    
    
    if (lightArg.getValue() && sceneEvidence.size() > 10000)
    {
      KernelCollection tmp;
      for (KernelCollection::sample_iterator i = sceneEvidence.sampleBegin(10000);
           i != i.end(); i++)
      {
        tmp.add(*i);
      }
      sceneEvidence = tmp;
    }

    if (sceneEvidence.size() > 10000)
      std::cout << "Warning: Scene model has more than 10000 points. "
      "To keep computation time low, keep the model under 10000 points. "
      "Use --light to fix this." << std::endl;
    
      
    // Kernel widths, for position and orientation:
    const double locH = (locHArg.getValue()<=0
                         ?
                         objectEvidence.moments()->getLocH()/10
                         :
                         locHArg.getValue()
                         );
    const double oriH = oriHArg.getValue(); // in radians
    
    // For best performances, choose a multiple of
    // the number of logical cores.
    const int nChains = (nChainsArg.getValue()<=0
                         ?
                         8
                         :
                         nChainsArg.getValue()
                         );
    
    int n = -1;
    
    if (nArg.getValue() <= 0)
    {
      n = objectEvidence.size();
      if (n > 1000)
      {
        std::cout << "Warning: Object model has more than 1000 points. "
        "To keep computational cost low, only 1000 points will be used at each "
        "inference loop. "
        "Use -n to force a large number of model points." << std::endl;
        n = 1000;
      }
    }
    else
      n = nArg.getValue();
    
    // ------------------------------- //
    // Prepare density for evaluation: //
    // ------------------------------- //
    
    sceneEvidence.setKernelLocH(locH);
    sceneEvidence.setKernelOriH(oriH);
    objectEvidence.setKernelLocH(locH);
    objectEvidence.setKernelOriH(oriH);
    
    objectEvidence.computeKernelStatistics();
    sceneEvidence.computeKernelStatistics();
    sceneEvidence.buildKdTree();
    
    kernel::se3 t = estimatePose(objectEvidence, sceneEvidence, nChains, n);
    
    if (accurateScoreArg.getValue())
    {
      t.setWeight(0);
      for (KernelCollection::const_iterator i = objectEvidence.begin();
           i != objectEvidence.end(); ++i)
      {
        weight_t w = 0;
        if (WEIGHTED_SUM_EVIDENCE_EVAL)
        {
          w = sceneEvidence.evaluationAt(*i->polyTransformedWith(t),
                                         KernelCollection::WEIGHTED_SUM_EVAL);
        }
        else
        {
          w = sceneEvidence.evaluationAt(*i->polyTransformedWith(t), KernelCollection::MAX_EVAL);
        }
        t.setWeight(t.getWeight() + w);
      }
    }
    
    if (!bestTransfoArg.getValue().empty())
    {
      writeSingleObservation(bestTransfoArg.getValue(), t);
    }
    
    if (!alignedObjectEvidenceFileArg.getValue().empty())
    {
      objectEvidence.transformWith(t);
      writeObservations(alignedObjectEvidenceFileArg.getValue(), objectEvidence);
    }
    
    return 0;
  }
  catch (std::exception &e) {
    std::cerr << "Exception caught: ";
    std::cerr << e.what() << std::endl;
    return EXIT_FAILURE;
  }
  catch (...) {
    std::cerr << "Caught unknown exception." << std::endl;
    return EXIT_FAILURE;
  }
  
}
/**
 * main procedure
 *
 * @param argc # of args
 * @param argv args
 *
 * @return no error
 */
int main ( int argc, char** argv )
{

  try
    {
      //CmdLine Parser generator
      
      
      CmdLine cmd ( "Quasi Affine Transform in dimension 2", ' ', "0.3" );
      vector<Arg *> xorlist;
   
      //Option (not required)
      SwitchArg backwardSwitch ( "l","linearbackward","Bilinear Backward Mapping", false );
      xorlist.push_back(&backwardSwitch);
      SwitchArg backwardNNSwitch ( "n","NNbackward","Nearest Neighbor Backward Mapping", false );
      xorlist.push_back(&backwardNNSwitch);
      SwitchArg naiveSwitch ( "","naive","Naive BoundingRect method",false );
      xorlist.push_back(&naiveSwitch);
      SwitchArg periodicitySwitch ( "p","periodicity","Use paving periodicity", false );
      xorlist.push_back(&periodicitySwitch);
      cmd.xorAdd ( xorlist);
      
      SwitchArg nomultiplySwitch ( "m","no_multiplication","No multiplications in the paving computation", false );
      cmd.add ( nomultiplySwitch );
      SwitchArg fakeColorSwitch ( "f","fake_color","Output fake colors to illustrate pavings (non contracting AQA only)", false );
      cmd.add ( fakeColorSwitch );
      SwitchArg compositionSwitch ( "","composition","Composition test: f.f^{-1}", false );
      cmd.add ( compositionSwitch );
      ValueArg<string> transformFile ( "t","transform","The transform file name (this file should contain in this order : omega, a, b, c, d, e, f, separated with spaces, where the quasi-affine transform is : (ax + by + e, cx + dy + f) / omega)",true,"","file name" );
      cmd.add ( transformFile );
      ValueArg<string> outFile ( "o","output","The output image file name",true,"","file name" );
      cmd.add ( outFile );
      ValueArg<string> inFile ( "i","input","The input image file name",true,"","file name" );
      cmd.add ( inFile );

      // Parse the argv array.
      cmd.parse ( argc, argv );

      // Get the value parsed by each arg.
      InterpolationType interp = NO_BM;
      if ( backwardSwitch.getValue() )
        interp = LINEAR;
      else
        if ( backwardNNSwitch.getValue() )
          interp = NN;

      bool useBoundingRect = naiveSwitch.getValue();
      bool noMultiply = nomultiplySwitch.getValue();
      bool usePeriodicity = periodicitySwitch.getValue();
      bool fakeColor = fakeColorSwitch.getValue();
      bool composition = compositionSwitch.getValue();


      cout <<"Cmd Line Test: [ ";

      cout << "Interpolation type : " << interp <<", ";
      cout << "BoundingRect : ";
      if ( useBoundingRect )
        cout << "Y, ";
      else
        cout << "N, ";

      cout << "noMultiply : ";
      if ( noMultiply )
        cout << "Y, ";
      else
        cout << "N, ";

      cout << "Periodicity : ";
      if ( usePeriodicity )
        cout << "Y";
      else
        cout << "N";
      cout << " ]"<<endl;

      cout << "Fake Colors: ";
      if ( fakeColor )
        cout << "Y";
      else
        cout << "N";
      cout << " ]"<<endl;


      // Aquisition de données
      int o, a, b, c, d, e, f;
      fstream transform ( transformFile.getValue().c_str(), fstream::in );
      transform >> o>> a >> b >> c >> d >> e >> f;
      transform.close();

      Image initialImage ( inFile.getValue() );

      QAT qat ( Matrix2x2 ( a, b, c, d ), o, Vector2D ( e, f ) );

      QAT qat2 ( Matrix2x2 ( a, b, c, d ), o, Vector2D ( e, f ) );

      if ( composition )
        {
          Image finalImage = qat.applyToImage ( initialImage, interp, useBoundingRect, usePeriodicity, noMultiply, fakeColor, false );
          cout << "Inverse computation..."<<endl;
          Image inverse = qat2.applyToImage ( finalImage, interp, useBoundingRect, usePeriodicity, noMultiply, fakeColor, true );
          inverse.write ( outFile.getValue() );

          double db = psnr ( initialImage, inverse );
          cout << "PSNR = "<<db<<" db"<<endl;
        }
      else
        {
          Image finalImage = qat.applyToImage ( initialImage, interp , useBoundingRect, usePeriodicity, noMultiply, fakeColor, false );
          finalImage.write ( outFile.getValue() );
        }

    }
  catch ( ArgException &e )  // catch any exceptions
    {
      std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl;
    }

  return 0;
}