Example #1
0
int main(int argc, char *argv[])
{
    // Info: reads only the _first_ 3D model in the NVM file!

    TCLAP::CmdLine cmd("LINE3D++");

    TCLAP::ValueArg<std::string> inputArg("i", "input_folder", "folder containing the images (if not specified, path in .nvm file is expected to be correct)", false, "", "string");
    cmd.add(inputArg);

    TCLAP::ValueArg<std::string> nvmArg("m", "nvm_file", "full path to the VisualSfM result file (.nvm)", true, ".", "string");
    cmd.add(nvmArg);

    TCLAP::ValueArg<std::string> outputArg("o", "output_folder", "folder where result and temporary files are stored (if not specified --> input_folder+'/Line3D++/')", false, "", "string");
    cmd.add(outputArg);

    TCLAP::ValueArg<int> scaleArg("w", "max_image_width", "scale image down to fixed max width for line segment detection", false, L3D_DEF_MAX_IMG_WIDTH, "int");
    cmd.add(scaleArg);

    TCLAP::ValueArg<int> neighborArg("n", "num_matching_neighbors", "number of neighbors for matching", false, L3D_DEF_MATCHING_NEIGHBORS, "int");
    cmd.add(neighborArg);

    TCLAP::ValueArg<float> sigma_A_Arg("a", "sigma_a", "angle regularizer", false, L3D_DEF_SCORING_ANG_REGULARIZER, "float");
    cmd.add(sigma_A_Arg);

    TCLAP::ValueArg<float> sigma_P_Arg("p", "sigma_p", "position regularizer (if negative: fixed sigma_p in world-coordinates)", false, L3D_DEF_SCORING_POS_REGULARIZER, "float");
    cmd.add(sigma_P_Arg);

    TCLAP::ValueArg<float> epipolarArg("e", "min_epipolar_overlap", "minimum epipolar overlap for matching", false, L3D_DEF_EPIPOLAR_OVERLAP, "float");
    cmd.add(epipolarArg);

    TCLAP::ValueArg<int> knnArg("k", "knn_matches", "number of matches to be kept (<= 0 --> use all that fulfill overlap)", false, L3D_DEF_KNN, "int");
    cmd.add(knnArg);

    TCLAP::ValueArg<int> segNumArg("y", "num_segments_per_image", "maximum number of 2D segments per image (longest)", false, L3D_DEF_MAX_NUM_SEGMENTS, "int");
    cmd.add(segNumArg);

    TCLAP::ValueArg<int> visibilityArg("v", "visibility_t", "minimum number of cameras to see a valid 3D line", false, L3D_DEF_MIN_VISIBILITY_T, "int");
    cmd.add(visibilityArg);

    TCLAP::ValueArg<bool> diffusionArg("d", "diffusion", "perform Replicator Dynamics Diffusion before clustering", false, L3D_DEF_PERFORM_RDD, "bool");
    cmd.add(diffusionArg);

    TCLAP::ValueArg<bool> loadArg("l", "load_and_store_flag", "load/store segments (recommended for big images)", false, L3D_DEF_LOAD_AND_STORE_SEGMENTS, "bool");
    cmd.add(loadArg);

    TCLAP::ValueArg<float> collinArg("r", "collinearity_t", "threshold for collinearity", false, L3D_DEF_COLLINEARITY_T, "float");
    cmd.add(collinArg);

    TCLAP::ValueArg<bool> cudaArg("g", "use_cuda", "use the GPU (CUDA)", false, true, "bool");
    cmd.add(cudaArg);

    TCLAP::ValueArg<bool> ceresArg("c", "use_ceres", "use CERES (for 3D line optimization)", false, L3D_DEF_USE_CERES, "bool");
    cmd.add(ceresArg);

    TCLAP::ValueArg<float> minBaselineArg("x", "min_image_baseline", "minimum baseline between matching images (world space)", false, L3D_DEF_MIN_BASELINE, "float");
    cmd.add(minBaselineArg);

    TCLAP::ValueArg<float> constRegDepthArg("z", "const_reg_depth", "use a constant regularization depth (only when sigma_p is metric!)", false, -1.0f, "float");
    cmd.add(constRegDepthArg);

    // read arguments
    cmd.parse(argc,argv);
    std::string inputFolder = inputArg.getValue().c_str();
    std::string nvmFile = nvmArg.getValue().c_str();

    // check if NVM file exists
    boost::filesystem::path nvm(nvmFile);
    if(!boost::filesystem::exists(nvm))
    {
        std::cerr << "NVM file " << nvmFile << " does not exist!" << std::endl;
        return -1;
    }

    bool use_full_image_path = false;
    if(inputFolder.length() == 0)
    {
        // parse input folder from .nvm file
        use_full_image_path = true;
        inputFolder = nvm.parent_path().string();
    }

    std::string outputFolder = outputArg.getValue().c_str();
    if(outputFolder.length() == 0)
        outputFolder = inputFolder+"/Line3D++/";

    int maxWidth = scaleArg.getValue();
    unsigned int neighbors = std::max(neighborArg.getValue(),2);
    bool diffusion = diffusionArg.getValue();
    bool loadAndStore = loadArg.getValue();
    float collinearity = collinArg.getValue();
    bool useGPU = cudaArg.getValue();
    bool useCERES = ceresArg.getValue();
    float epipolarOverlap = fmin(fabs(epipolarArg.getValue()),0.99f);
    float sigmaA = fabs(sigma_A_Arg.getValue());
    float sigmaP = sigma_P_Arg.getValue();
    float minBaseline = fabs(minBaselineArg.getValue());
    int kNN = knnArg.getValue();
    unsigned int maxNumSegments = segNumArg.getValue();
    unsigned int visibility_t = visibilityArg.getValue();
    float constRegDepth = constRegDepthArg.getValue();

    // create output directory
    boost::filesystem::path dir(outputFolder);
    boost::filesystem::create_directory(dir);

    // create Line3D++ object
    L3DPP::Line3D* Line3D = new L3DPP::Line3D(outputFolder,loadAndStore,maxWidth,
                                              maxNumSegments,true,useGPU);

    // read NVM file
    std::ifstream nvm_file;
    nvm_file.open(nvmFile.c_str());

    std::string nvm_line;
    std::getline(nvm_file,nvm_line); // ignore first line...
    std::getline(nvm_file,nvm_line); // ignore second line...

    // read number of images
    std::getline(nvm_file,nvm_line);
    std::stringstream nvm_stream(nvm_line);
    unsigned int num_cams;
    nvm_stream >> num_cams;

    if(num_cams == 0)
    {
        std::cerr << "No aligned cameras in NVM file!" << std::endl;
        return -1;
    }

    // read camera data (sequentially)
    std::vector<std::string> cams_imgFilenames(num_cams);
    std::vector<float> cams_focals(num_cams);
    std::vector<Eigen::Matrix3d> cams_rotation(num_cams);
    std::vector<Eigen::Vector3d> cams_translation(num_cams);
    std::vector<Eigen::Vector3d> cams_centers(num_cams);
    std::vector<float> cams_distortion(num_cams);
    for(unsigned int i=0; i<num_cams; ++i)
    {
        std::getline(nvm_file,nvm_line);

        // image filename
        std::string filename;

        // focal_length,quaternion,center,distortion
        double focal_length,qx,qy,qz,qw;
        double Cx,Cy,Cz,dist;

        nvm_stream.str("");
        nvm_stream.clear();
        nvm_stream.str(nvm_line);
        nvm_stream >> filename >> focal_length >> qw >> qx >> qy >> qz;
        nvm_stream >> Cx >> Cy >> Cz >> dist;

        cams_imgFilenames[i] = filename;
        cams_focals[i] = focal_length;
        cams_distortion[i] = dist;

        // rotation amd translation
        Eigen::Matrix3d R;
        R(0,0) = 1.0-2.0*qy*qy-2.0*qz*qz;
        R(0,1) = 2.0*qx*qy-2.0*qz*qw;
        R(0,2) = 2.0*qx*qz+2.0*qy*qw;

        R(1,0) = 2.0*qx*qy+2.0*qz*qw;
        R(1,1) = 1.0-2.0*qx*qx-2.0*qz*qz;
        R(1,2) = 2.0*qy*qz-2.0*qx*qw;

        R(2,0) = 2.0*qx*qz-2.0*qy*qw;
        R(2,1) = 2.0*qy*qz+2.0*qx*qw;
        R(2,2) = 1.0-2.0*qx*qx-2.0*qy*qy;

        Eigen::Vector3d C(Cx,Cy,Cz);
        cams_centers[i] = C;
        Eigen::Vector3d t = -R*C;

        cams_translation[i] = t;
        cams_rotation[i] = R;
    }

    // read number of images
    std::getline(nvm_file,nvm_line); // ignore line...
    std::getline(nvm_file,nvm_line);
    nvm_stream.str("");
    nvm_stream.clear();
    nvm_stream.str(nvm_line);
    unsigned int num_points;
    nvm_stream >> num_points;

    // read features (for image similarity calculation)
    std::vector<std::list<unsigned int> > cams_worldpointIDs(num_cams);
    std::vector<std::vector<float> > cams_worldpointDepths(num_cams);
    for(unsigned int i=0; i<num_points; ++i)
    {
        // 3D position
        std::getline(nvm_file,nvm_line);
        std::istringstream iss_point3D(nvm_line);
        double px,py,pz,colR,colG,colB;
        iss_point3D >> px >> py >> pz;
        iss_point3D >> colR >> colG >> colB;
        Eigen::Vector3d pos3D(px,py,pz);

        // measurements
        unsigned int num_views;
        iss_point3D >> num_views;

        unsigned int camID,siftID;
        float posX,posY;
        for(unsigned int j=0; j<num_views; ++j)
        {
            iss_point3D >> camID >> siftID;
            iss_point3D >> posX >> posY;
            cams_worldpointIDs[camID].push_back(i);

            cams_worldpointDepths[camID].push_back((pos3D-cams_centers[camID]).norm());
        }
    }
    nvm_file.close();

    // load images (parallel)
#ifdef L3DPP_OPENMP
    #pragma omp parallel for
#endif //L3DPP_OPENMP
    for(unsigned int i=0; i<num_cams; ++i)
    {
        if(cams_worldpointDepths[i].size() > 0)
        {
            // parse filename
            std::string fname = cams_imgFilenames[i];
            boost::filesystem::path img_path(fname);

            // load image
            cv::Mat image;
            if(use_full_image_path)
                image = cv::imread(inputFolder+"/"+fname,CV_LOAD_IMAGE_GRAYSCALE);
            else
                image = cv::imread(inputFolder+"/"+img_path.filename().string(),CV_LOAD_IMAGE_GRAYSCALE);

            // setup intrinsics
            float px = float(image.cols)/2.0f;
            float py = float(image.rows)/2.0f;
            float f = cams_focals[i];

            Eigen::Matrix3d K = Eigen::Matrix3d::Zero();
            K(0,0) = f;
            K(1,1) = f;
            K(0,2) = px;
            K(1,2) = py;
            K(2,2) = 1.0;

            // undistort (if necessary)
            float d = cams_distortion[i];

            cv::Mat img_undist;
            if(fabs(d) > L3D_EPS)
            {
                // undistorting
                Eigen::Vector3d radial(-d,0.0,0.0);
                Eigen::Vector2d tangential(0.0,0.0);
                Line3D->undistortImage(image,img_undist,radial,tangential,K);
            }
            else
            {
                // already undistorted
                img_undist = image;
            }

            // median point depth
            std::sort(cams_worldpointDepths[i].begin(),cams_worldpointDepths[i].end());
            size_t med_pos = cams_worldpointDepths[i].size()/2;
            float med_depth = cams_worldpointDepths[i].at(med_pos);

            // add to system
            Line3D->addImage(i,img_undist,K,cams_rotation[i],
                             cams_translation[i],
                             med_depth,cams_worldpointIDs[i]);
        }
    }

    // match images
    Line3D->matchImages(sigmaP,sigmaA,neighbors,epipolarOverlap,
                        minBaseline,kNN,constRegDepth);

    // compute result
    Line3D->reconstruct3Dlines(visibility_t,diffusion,collinearity,useCERES);

    // save end result
    std::vector<L3DPP::FinalLine3D> result;
    Line3D->get3Dlines(result);

    // save as STL
    Line3D->saveResultAsSTL(outputFolder);
    // save as OBJ
    Line3D->saveResultAsOBJ(outputFolder);
    // save as TXT
    Line3D->save3DLinesAsTXT(outputFolder);

    // cleanup
    delete Line3D;
}
Example #2
0
int main(int argc, char *argv[])
{
    TCLAP::CmdLine cmd("LINE3D++");

    TCLAP::ValueArg<std::string> inputArg("i", "input_folder", "folder containing the original images", true, ".", "string");
    cmd.add(inputArg);

    TCLAP::ValueArg<std::string> jsonArg("j", "sfm_json_file", "full path to the OpenMVG result file (sfm_data.json)", true, ".", "string");
    cmd.add(jsonArg);

    TCLAP::ValueArg<std::string> outputArg("o", "output_folder", "folder where result and temporary files are stored (if not specified --> input_folder+'/Line3D++/')", false, "", "string");
    cmd.add(outputArg);

    TCLAP::ValueArg<int> scaleArg("w", "max_image_width", "scale image down to fixed max width for line segment detection", false, L3D_DEF_MAX_IMG_WIDTH, "int");
    cmd.add(scaleArg);

    TCLAP::ValueArg<int> neighborArg("n", "num_matching_neighbors", "number of neighbors for matching (-1 --> use all)", false, L3D_DEF_MATCHING_NEIGHBORS, "int");
    cmd.add(neighborArg);

    TCLAP::ValueArg<float> sigma_A_Arg("a", "sigma_a", "angle regularizer", false, L3D_DEF_SCORING_ANG_REGULARIZER, "float");
    cmd.add(sigma_A_Arg);

    TCLAP::ValueArg<float> sigma_P_Arg("p", "sigma_p", "position regularizer (if negative: fixed sigma_p in world-coordinates)", false, L3D_DEF_SCORING_POS_REGULARIZER, "float");
    cmd.add(sigma_P_Arg);

    TCLAP::ValueArg<float> epipolarArg("e", "min_epipolar_overlap", "minimum epipolar overlap for matching", false, L3D_DEF_EPIPOLAR_OVERLAP, "float");
    cmd.add(epipolarArg);

    TCLAP::ValueArg<int> knnArg("k", "knn_matches", "number of matches to be kept (<= 0 --> use all that fulfill overlap)", false, L3D_DEF_KNN, "int");
    cmd.add(knnArg);

    TCLAP::ValueArg<int> segNumArg("y", "num_segments_per_image", "maximum number of 2D segments per image (longest)", false, L3D_DEF_MAX_NUM_SEGMENTS, "int");
    cmd.add(segNumArg);

    TCLAP::ValueArg<int> visibilityArg("v", "visibility_t", "minimum number of cameras to see a valid 3D line", false, L3D_DEF_MIN_VISIBILITY_T, "int");
    cmd.add(visibilityArg);

    TCLAP::ValueArg<bool> diffusionArg("d", "diffusion", "perform Replicator Dynamics Diffusion before clustering", false, L3D_DEF_PERFORM_RDD, "bool");
    cmd.add(diffusionArg);

    TCLAP::ValueArg<bool> loadArg("l", "load_and_store_flag", "load/store segments (recommended for big images)", false, L3D_DEF_LOAD_AND_STORE_SEGMENTS, "bool");
    cmd.add(loadArg);

    TCLAP::ValueArg<float> collinArg("r", "collinearity_t", "threshold for collinearity", false, L3D_DEF_COLLINEARITY_T, "float");
    cmd.add(collinArg);

    TCLAP::ValueArg<bool> cudaArg("g", "use_cuda", "use the GPU (CUDA)", false, true, "bool");
    cmd.add(cudaArg);

    TCLAP::ValueArg<bool> ceresArg("c", "use_ceres", "use CERES (for 3D line optimization)", false, L3D_DEF_USE_CERES, "bool");
    cmd.add(ceresArg);

    TCLAP::ValueArg<float> constRegDepthArg("z", "const_reg_depth", "use a constant regularization depth (only when sigma_p is metric!)", false, -1.0f, "float");
    cmd.add(constRegDepthArg);

    // read arguments
    cmd.parse(argc,argv);
    std::string inputFolder = inputArg.getValue().c_str();
    std::string jsonFile = jsonArg.getValue().c_str();
    std::string outputFolder = outputArg.getValue().c_str();
    if(outputFolder.length() == 0)
        outputFolder = inputFolder+"/Line3D++/";

    int maxWidth = scaleArg.getValue();
    unsigned int neighbors = std::max(neighborArg.getValue(),2);
    bool diffusion = diffusionArg.getValue();
    bool loadAndStore = loadArg.getValue();
    float collinearity = collinArg.getValue();
    bool useGPU = cudaArg.getValue();
    bool useCERES = ceresArg.getValue();
    float epipolarOverlap = fmin(fabs(epipolarArg.getValue()),0.99f);
    float sigmaA = fabs(sigma_A_Arg.getValue());
    float sigmaP = sigma_P_Arg.getValue();
    int kNN = knnArg.getValue();
    unsigned int maxNumSegments = segNumArg.getValue();
    unsigned int visibility_t = visibilityArg.getValue();
    float constRegDepth = constRegDepthArg.getValue();

    // check if json file exists
    boost::filesystem::path json(jsonFile);
    if(!boost::filesystem::exists(json))
    {
        std::cerr << "OpenMVG json file " << jsonFile << " does not exist!" << std::endl;
        return -1;
    }

    // create output directory
    boost::filesystem::path dir(outputFolder);
    boost::filesystem::create_directory(dir);

    // create Line3D++ object
    L3DPP::Line3D* Line3D = new L3DPP::Line3D(outputFolder,loadAndStore,maxWidth,
                                              maxNumSegments,true,useGPU);

    // parse json file
    std::ifstream jsonFileIFS(jsonFile.c_str());
    std::string str((std::istreambuf_iterator<char>(jsonFileIFS)),
                     std::istreambuf_iterator<char>());
    rapidjson::Document d;
    d.Parse(str.c_str());

    rapidjson::Value& s = d["views"];
    size_t num_cams =  s.Size();

    if(num_cams == 0)
    {
        std::cerr << "No aligned cameras in json file!" << std::endl;
        return -1;
    }

    // read image IDs and filename (sequentially)
    std::vector<std::string> cams_imgFilenames(num_cams);
    std::vector<unsigned int> cams_intrinsic_IDs(num_cams);
    std::vector<unsigned int> cams_view_IDs(num_cams);
    std::vector<unsigned int> cams_pose_IDs(num_cams);
    std::vector<bool> img_found(num_cams);
    std::map<unsigned int,unsigned int> pose2view;

    for(rapidjson::SizeType i=0; i<s.Size(); ++i)
    {
        rapidjson::Value& array_element = s[i];
        rapidjson::Value& view_data = array_element["value"]["ptr_wrapper"]["data"];

        std::string filename = view_data["filename"].GetString();
        unsigned int view_id = view_data["id_view"].GetUint();
        unsigned int intrinsic_id = view_data["id_intrinsic"].GetUint();
        unsigned int pose_id = view_data["id_pose"].GetUint();

        std::string full_path = inputFolder+"/"+filename;
        boost::filesystem::path full_path_check(full_path);
        if(boost::filesystem::exists(full_path_check))
        {
            // image exists
            cams_imgFilenames[i] = full_path;
            cams_view_IDs[i] = view_id;
            cams_intrinsic_IDs[i] = intrinsic_id;
            cams_pose_IDs[i] = pose_id;
            img_found[i] = true;

            pose2view[pose_id] = view_id;
        }
        else
        {
            // image not found...
            img_found[i] = false;
            std::cerr << "WARNING: image '" << filename << "' not found (ID=" << view_id << ")" << std::endl;
        }
    }

    // read intrinsics (sequentially)
    std::map<unsigned int,Eigen::Vector3d> radial_dist;
    std::map<unsigned int,Eigen::Vector2d> tangential_dist;
    std::map<unsigned int,Eigen::Matrix3d> K_matrices;
    std::map<unsigned int,bool> is_distorted;

    rapidjson::Value& intr = d["intrinsics"];
    size_t num_intrinsics =  intr.Size();

    if(num_intrinsics == 0)
    {
        std::cerr << "No intrinsics in json file!" << std::endl;
        return -1;
    }

    std::string cam_model;
    for(rapidjson::SizeType i=0; i<intr.Size(); ++i)
    {
        rapidjson::Value& array_element = intr[i];
        rapidjson::Value& intr_data = array_element["value"]["ptr_wrapper"]["data"];
        if (array_element["value"].HasMember("polymorphic_name"))
          cam_model = array_element["value"]["polymorphic_name"].GetString();
        unsigned int groupID = array_element["key"].GetUint();

        bool distorted = false;
        Eigen::Vector3d radial_d(0,0,0);
        Eigen::Vector2d tangential_d(0,0);

        double focal_length = intr_data["focal_length"].GetDouble();;

        Eigen::Vector2d principle_p;
        principle_p(0) = intr_data["principal_point"][0].GetDouble();
        principle_p(1) = intr_data["principal_point"][1].GetDouble();

        // check camera model for distortion
        if(cam_model.compare("pinhole_radial_k3") == 0)
        {
            // 3 radial
            radial_d(0) = intr_data["disto_k3"][0].GetDouble();
            radial_d(1) = intr_data["disto_k3"][1].GetDouble();
            radial_d(2) = intr_data["disto_k3"][2].GetDouble();
        }
        else if(cam_model.compare("pinhole_radial_k1") == 0)
        {
            // 1 radial
            radial_d(0) = intr_data["disto_k1"][0].GetDouble();
        }
        else if(cam_model.compare("pinhole_brown_t2") == 0)
        {
            // 3 radial
            radial_d(0) = intr_data["disto_t2"][0].GetDouble();
            radial_d(1) = intr_data["disto_t2"][1].GetDouble();
            radial_d(2) = intr_data["disto_t2"][2].GetDouble();
            // 2 tangential
            tangential_d(0) = intr_data["disto_t2"][3].GetDouble();
            tangential_d(1) = intr_data["disto_t2"][4].GetDouble();
        }
        else if(cam_model.compare("pinhole") != 0)
        {
            std::cerr << "WARNING: camera model '" << cam_model << "' for group " << groupID << " unknown! No distortion assumed..." << std::endl;
        }

        // check if distortion actually occured
        if(fabs(radial_d(0)) > L3D_EPS || fabs(radial_d(1)) > L3D_EPS || fabs(radial_d(2)) > L3D_EPS ||
                fabs(tangential_d(0)) > L3D_EPS || fabs(tangential_d(1)) > L3D_EPS)
        {
            distorted = true;
        }

        // create K
        Eigen::Matrix3d K = Eigen::Matrix3d::Zero();
        K(0,0) = focal_length;
        K(1,1) = focal_length;
        K(0,2) = principle_p(0);
        K(1,2) = principle_p(1);
        K(2,2) = 1.0;

        // store
        radial_dist[groupID] = radial_d;
        tangential_dist[groupID] = tangential_d;
        K_matrices[groupID] = K;
        is_distorted[groupID] = distorted;
    }

    // read extrinsics (sequentially)
    std::map<unsigned int,Eigen::Vector3d> translations;
    std::map<unsigned int,Eigen::Vector3d> centers;
    std::map<unsigned int,Eigen::Matrix3d> rotations;

    rapidjson::Value& extr = d["extrinsics"];
    size_t num_extrinsics =  extr.Size();

    if(num_extrinsics == 0)
    {
        std::cerr << "No extrinsics in json file!" << std::endl;
        return -1;
    }

    for(rapidjson::SizeType i=0; i<extr.Size(); ++i)
    {
        rapidjson::Value& array_element = extr[i];
        unsigned int poseID = array_element["key"].GetUint();

        if(pose2view.find(poseID) != pose2view.end())
        {
            unsigned int viewID = pose2view[poseID];

            // rotation
            rapidjson::Value& _R = array_element["value"]["rotation"];
            Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
            R(0,0) = _R[0][0].GetDouble(); R(0,1) = _R[0][1].GetDouble(); R(0,2) = _R[0][2].GetDouble();
            R(1,0) = _R[1][0].GetDouble(); R(1,1) = _R[1][1].GetDouble(); R(1,2) = _R[1][2].GetDouble();
            R(2,0) = _R[2][0].GetDouble(); R(2,1) = _R[2][1].GetDouble(); R(2,2) = _R[2][2].GetDouble();

            // center
            rapidjson::Value& _C = array_element["value"]["center"];
            Eigen::Vector3d C;
            C(0) = _C[0].GetDouble(); C(1) = _C[1].GetDouble(); C(2) = _C[2].GetDouble();

            // translation
            Eigen::Vector3d t = -R*C;

            // store
            translations[viewID] = t;
            centers[viewID] = C;
            rotations[viewID] = R;
        }
        else
        {
            std::cerr << "WARNING: pose with ID " << poseID << " does not map to an image!" << std::endl;
        }
    }

    // read worldpoint data (sequentially)
    std::map<unsigned int,std::list<unsigned int> > views2wps;
    std::map<unsigned int,std::vector<float> > views2depths;

    rapidjson::Value& wps = d["structure"];
    size_t num_wps =  wps.Size();

    if(num_wps == 0)
    {
        std::cerr << "No worldpoints in json file!" << std::endl;
        return -1;
    }

    for(rapidjson::SizeType i=0; i<wps.Size(); ++i)
    {
        rapidjson::Value& array_element = wps[i];
        rapidjson::Value& wp_data = array_element["value"];

        // id and position
        unsigned int wpID = array_element["key"].GetUint();
        Eigen::Vector3d X;
        X(0) = wp_data["X"][0].GetDouble();
        X(1) = wp_data["X"][1].GetDouble();
        X(2) = wp_data["X"][2].GetDouble();

        // observations
        size_t num_obs = wp_data["observations"].Size();
        for(size_t j=0; j<num_obs; ++j)
        {
            unsigned int viewID = wp_data["observations"][j]["key"].GetUint();

            if(centers.find(viewID) != centers.end())
            {
                float depth = (centers[viewID]-X).norm();

                // store in list
                views2wps[viewID].push_back(wpID);
                views2depths[viewID].push_back(depth);
            }
        }
    }

    // load images (parallel)
#ifdef L3DPP_OPENMP
    #pragma omp parallel for
#endif //L3DPP_OPENMP
    for(unsigned int i=0; i<num_cams; ++i)
    {
        unsigned int camID = cams_view_IDs[i];
        unsigned int intID = cams_intrinsic_IDs[i];

        if(views2wps.find(camID) != views2wps.end() && img_found[i] &&
                K_matrices.find(intID) != K_matrices.end())
        {
            // load image
            cv::Mat image = cv::imread(cams_imgFilenames[i],CV_LOAD_IMAGE_GRAYSCALE);

            // intrinsics
            Eigen::Matrix3d K = K_matrices[intID];

            // undistort (if necessary)
            bool distorted = is_distorted[intID];
            Eigen::Vector3d radial = radial_dist[intID];
            Eigen::Vector2d tangential = tangential_dist[intID];

            cv::Mat img_undist;
            if(distorted)
            {
                // undistorting
                Line3D->undistortImage(image,img_undist,radial,tangential,K);
            }
            else
            {
                // already undistorted
                img_undist = image;
            }

            // median point depth
            std::sort(views2depths[camID].begin(),views2depths[camID].end());
            size_t med_pos = views2depths[camID].size()/2;
            float med_depth = views2depths[camID].at(med_pos);

            // add to system
            Line3D->addImage(camID,img_undist,K,rotations[camID],
                             translations[camID],
                             med_depth,views2wps[camID]);
        }
    }

    // match images
    Line3D->matchImages(sigmaP,sigmaA,neighbors,epipolarOverlap,
                        kNN,constRegDepth);

    // compute result
    Line3D->reconstruct3Dlines(visibility_t,diffusion,collinearity,useCERES);

    // save end result
    std::vector<L3DPP::FinalLine3D> result;
    Line3D->get3Dlines(result);

    // save as STL
    Line3D->saveResultAsSTL(outputFolder);
    // save as OBJ
    Line3D->saveResultAsOBJ(outputFolder);
    // save as TXT
    Line3D->save3DLinesAsTXT(outputFolder);
    // save as BIN
    Line3D->save3DLinesAsBIN(outputFolder);

    // cleanup
    delete Line3D;
}
Example #3
0
int main(int argc, char *argv[])
{
    TCLAP::CmdLine cmd("LINE3D++");

    TCLAP::ValueArg<std::string> inputArg("i", "input_folder", "folder containing the images", true, "", "string");
    cmd.add(inputArg);

    TCLAP::ValueArg<std::string> sfmArg("m", "sfm_folder", "full path to the colmap result files (cameras.txt, images.txt, and points3D.txt), if not specified --> input_folder", false, "", "string");
    cmd.add(sfmArg);

    TCLAP::ValueArg<std::string> outputArg("o", "output_folder", "folder where result and temporary files are stored (if not specified --> sfm_folder+'/Line3D++/')", false, "", "string");
    cmd.add(outputArg);

    TCLAP::ValueArg<int> scaleArg("w", "max_image_width", "scale image down to fixed max width for line segment detection", false, L3D_DEF_MAX_IMG_WIDTH, "int");
    cmd.add(scaleArg);

    TCLAP::ValueArg<int> neighborArg("n", "num_matching_neighbors", "number of neighbors for matching", false, L3D_DEF_MATCHING_NEIGHBORS, "int");
    cmd.add(neighborArg);

    TCLAP::ValueArg<float> sigma_A_Arg("a", "sigma_a", "angle regularizer", false, L3D_DEF_SCORING_ANG_REGULARIZER, "float");
    cmd.add(sigma_A_Arg);

    TCLAP::ValueArg<float> sigma_P_Arg("p", "sigma_p", "position regularizer (if negative: fixed sigma_p in world-coordinates)", false, L3D_DEF_SCORING_POS_REGULARIZER, "float");
    cmd.add(sigma_P_Arg);

    TCLAP::ValueArg<float> epipolarArg("e", "min_epipolar_overlap", "minimum epipolar overlap for matching", false, L3D_DEF_EPIPOLAR_OVERLAP, "float");
    cmd.add(epipolarArg);

    TCLAP::ValueArg<int> knnArg("k", "knn_matches", "number of matches to be kept (<= 0 --> use all that fulfill overlap)", false, L3D_DEF_KNN, "int");
    cmd.add(knnArg);

    TCLAP::ValueArg<int> segNumArg("y", "num_segments_per_image", "maximum number of 2D segments per image (longest)", false, L3D_DEF_MAX_NUM_SEGMENTS, "int");
    cmd.add(segNumArg);

    TCLAP::ValueArg<int> visibilityArg("v", "visibility_t", "minimum number of cameras to see a valid 3D line", false, L3D_DEF_MIN_VISIBILITY_T, "int");
    cmd.add(visibilityArg);

    TCLAP::ValueArg<bool> diffusionArg("d", "diffusion", "perform Replicator Dynamics Diffusion before clustering", false, L3D_DEF_PERFORM_RDD, "bool");
    cmd.add(diffusionArg);

    TCLAP::ValueArg<bool> loadArg("l", "load_and_store_flag", "load/store segments (recommended for big images)", false, L3D_DEF_LOAD_AND_STORE_SEGMENTS, "bool");
    cmd.add(loadArg);

    TCLAP::ValueArg<float> collinArg("r", "collinearity_t", "threshold for collinearity", false, L3D_DEF_COLLINEARITY_T, "float");
    cmd.add(collinArg);

    TCLAP::ValueArg<bool> cudaArg("g", "use_cuda", "use the GPU (CUDA)", false, true, "bool");
    cmd.add(cudaArg);

    TCLAP::ValueArg<bool> ceresArg("c", "use_ceres", "use CERES (for 3D line optimization)", false, L3D_DEF_USE_CERES, "bool");
    cmd.add(ceresArg);

    TCLAP::ValueArg<float> minBaselineArg("x", "min_image_baseline", "minimum baseline between matching images (world space)", false, L3D_DEF_MIN_BASELINE, "float");
    cmd.add(minBaselineArg);

    TCLAP::ValueArg<float> constRegDepthArg("z", "const_reg_depth", "use a constant regularization depth (only when sigma_p is metric!)", false, -1.0f, "float");
    cmd.add(constRegDepthArg);

    // read arguments
    cmd.parse(argc,argv);
    std::string inputFolder = inputArg.getValue().c_str();
    std::string sfmFolder = sfmArg.getValue().c_str();

    if(sfmFolder.length() == 0)
        sfmFolder = inputFolder;

    // check if colmap result folder
    boost::filesystem::path sfm(sfmFolder);
    if(!boost::filesystem::exists(sfm))
    {
        std::cerr << "colmap result folder " << sfm << " does not exist!" << std::endl;
        return -1;
    }

    std::string outputFolder = outputArg.getValue().c_str();
    if(outputFolder.length() == 0)
        outputFolder = sfmFolder+"/Line3D++/";

    int maxWidth = scaleArg.getValue();
    unsigned int neighbors = std::max(neighborArg.getValue(),2);
    bool diffusion = diffusionArg.getValue();
    bool loadAndStore = loadArg.getValue();
    float collinearity = collinArg.getValue();
    bool useGPU = cudaArg.getValue();
    bool useCERES = ceresArg.getValue();
    float epipolarOverlap = fmin(fabs(epipolarArg.getValue()),0.99f);
    float sigmaA = fabs(sigma_A_Arg.getValue());
    float sigmaP = sigma_P_Arg.getValue();
    float minBaseline = fabs(minBaselineArg.getValue());
    int kNN = knnArg.getValue();
    unsigned int maxNumSegments = segNumArg.getValue();
    unsigned int visibility_t = visibilityArg.getValue();
    float constRegDepth = constRegDepthArg.getValue();

    // create output directory
    boost::filesystem::path dir(outputFolder);
    boost::filesystem::create_directory(dir);

    // create Line3D++ object
    L3DPP::Line3D* Line3D = new L3DPP::Line3D(outputFolder,loadAndStore,maxWidth,
                                              maxNumSegments,true,useGPU);

    // check if result files exist
    boost::filesystem::path sfm_cameras(sfmFolder+"/cameras.txt");
    boost::filesystem::path sfm_images(sfmFolder+"/images.txt");
    boost::filesystem::path sfm_points3D(sfmFolder+"/points3D.txt");
    if(!boost::filesystem::exists(sfm_cameras) || !boost::filesystem::exists(sfm_images) ||
            !boost::filesystem::exists(sfm_points3D))
    {
        std::cerr << "at least one of the colmap result files does not exist in sfm folder: " << sfm << std::endl;
        return -2;
    }

    std::cout << std::endl << "reading colmap result..." << std::endl;

    // read cameras.txt
    std::ifstream cameras_file;
    cameras_file.open(sfm_cameras.c_str());
    std::string cameras_line;

    std::map<unsigned int,Eigen::Matrix3d> cams_K;
    std::map<unsigned int,Eigen::Vector3d> cams_radial;
    std::map<unsigned int,Eigen::Vector2d> cams_tangential;

    while(std::getline(cameras_file,cameras_line))
    {
        // check first character for a comment (#)
        if(cameras_line.substr(0,1).compare("#") != 0)
        {
            std::stringstream cameras_stream(cameras_line);

            unsigned int camID,width,height;
            std::string model;

            // parse essential data
            cameras_stream >> camID >> model >> width >> height;

            double fx,fy,cx,cy,k1,k2,k3,p1,p2;

            // check camera model
            if(model.compare("SIMPLE_PINHOLE") == 0)
            {
                // f,cx,cy
                cameras_stream >> fx >> cx >> cy;
                fy = fx;
                k1 = 0; k2 = 0; k3 = 0;
                p1 = 0; p2 = 0;
            }
            else if(model.compare("PINHOLE") == 0)
Example #4
0
int main(int argc, char *argv[])
{
    TCLAP::CmdLine cmd("LINE3D++");

    TCLAP::ValueArg<std::string> inputArg("i", "input_folder", "folder containing the images", true, ".", "string");
    cmd.add(inputArg);

    TCLAP::ValueArg<std::string> mavmapArg("b", "mavmap_output", "full path to the mavmap output (image-data-*.txt)", true, "", "string");
    cmd.add(mavmapArg);

    TCLAP::ValueArg<std::string> extArg("t", "image_extension", "image extension (case sensitive), if not specified: jpg, png or bmp expected", false, "", "string");
    cmd.add(extArg);

    TCLAP::ValueArg<std::string> prefixArg("f", "image_prefix", "optional image prefix", false, "", "string");
    cmd.add(prefixArg);

    TCLAP::ValueArg<std::string> outputArg("o", "output_folder", "folder where result and temporary files are stored (if not specified --> input_folder+'/Line3D++/')", false, "", "string");
    cmd.add(outputArg);

    TCLAP::ValueArg<int> scaleArg("w", "max_image_width", "scale image down to fixed max width for line segment detection", false, L3D_DEF_MAX_IMG_WIDTH, "int");
    cmd.add(scaleArg);

    TCLAP::ValueArg<int> neighborArg("n", "num_matching_neighbors", "number of neighbors for matching", false, L3D_DEF_MATCHING_NEIGHBORS, "int");
    cmd.add(neighborArg);

    TCLAP::ValueArg<float> sigma_A_Arg("a", "sigma_a", "angle regularizer", false, L3D_DEF_SCORING_ANG_REGULARIZER, "float");
    cmd.add(sigma_A_Arg);

    TCLAP::ValueArg<float> sigma_P_Arg("p", "sigma_p", "position regularizer (if negative: fixed sigma_p in world-coordinates)", false, L3D_DEF_SCORING_POS_REGULARIZER, "float");
    cmd.add(sigma_P_Arg);

    TCLAP::ValueArg<float> epipolarArg("e", "min_epipolar_overlap", "minimum epipolar overlap for matching", false, L3D_DEF_EPIPOLAR_OVERLAP, "float");
    cmd.add(epipolarArg);

    TCLAP::ValueArg<int> knnArg("k", "knn_matches", "number of matches to be kept (<= 0 --> use all that fulfill overlap)", false, L3D_DEF_KNN, "int");
    cmd.add(knnArg);

    TCLAP::ValueArg<int> segNumArg("y", "num_segments_per_image", "maximum number of 2D segments per image (longest)", false, L3D_DEF_MAX_NUM_SEGMENTS, "int");
    cmd.add(segNumArg);

    TCLAP::ValueArg<int> visibilityArg("v", "visibility_t", "minimum number of cameras to see a valid 3D line", false, L3D_DEF_MIN_VISIBILITY_T, "int");
    cmd.add(visibilityArg);

    TCLAP::ValueArg<bool> diffusionArg("d", "diffusion", "perform Replicator Dynamics Diffusion before clustering", false, L3D_DEF_PERFORM_RDD, "bool");
    cmd.add(diffusionArg);

    TCLAP::ValueArg<bool> loadArg("l", "load_and_store_flag", "load/store segments (recommended for big images)", false, L3D_DEF_LOAD_AND_STORE_SEGMENTS, "bool");
    cmd.add(loadArg);

    TCLAP::ValueArg<float> collinArg("r", "collinearity_t", "threshold for collinearity", false, L3D_DEF_COLLINEARITY_T, "float");
    cmd.add(collinArg);

    TCLAP::ValueArg<bool> cudaArg("g", "use_cuda", "use the GPU (CUDA)", false, true, "bool");
    cmd.add(cudaArg);

    TCLAP::ValueArg<bool> ceresArg("c", "use_ceres", "use CERES (for 3D line optimization)", false, L3D_DEF_USE_CERES, "bool");
    cmd.add(ceresArg);

    TCLAP::ValueArg<float> constRegDepthArg("z", "const_reg_depth", "use a constant regularization depth (only when sigma_p is metric!)", false, -1.0f, "float");
    cmd.add(constRegDepthArg);

    // read arguments
    cmd.parse(argc,argv);
    std::string inputFolder = inputArg.getValue().c_str();
    std::string mavmapFile = mavmapArg.getValue().c_str();
    std::string outputFolder = outputArg.getValue().c_str();
    std::string imgExtension = extArg.getValue().c_str();
    std::string imgPrefix = prefixArg.getValue().c_str();
    if(outputFolder.length() == 0)
        outputFolder = inputFolder+"/Line3D++/";

    int maxWidth = scaleArg.getValue();
    unsigned int neighbors = std::max(neighborArg.getValue(),2);
    bool diffusion = diffusionArg.getValue();
    bool loadAndStore = loadArg.getValue();
    float collinearity = collinArg.getValue();
    bool useGPU = cudaArg.getValue();
    bool useCERES = ceresArg.getValue();
    float epipolarOverlap = fmin(fabs(epipolarArg.getValue()),0.99f);
    float sigmaA = fabs(sigma_A_Arg.getValue());
    float sigmaP = sigma_P_Arg.getValue();
    int kNN = knnArg.getValue();
    unsigned int maxNumSegments = segNumArg.getValue();
    unsigned int visibility_t = visibilityArg.getValue();
    float constRegDepth = constRegDepthArg.getValue();

    if(imgExtension.substr(0,1) != ".")
        imgExtension = "."+imgExtension;

    // since no median depth can be computed without camera-to-worldpoint links
    // sigma_p MUST be positive and in pixels!
    if(sigmaP < L3D_EPS && constRegDepth < L3D_EPS)
    {
        std::cout << "sigma_p cannot be negative (i.e. in world coordiantes) when no valid regularization depth (--const_reg_depth) is given!" << std::endl;
        std::cout << "reverting to: sigma_p = 2.5px" << std::endl;
        sigmaP = 2.5f;
    }

    // check if mavmap file exists
    boost::filesystem::path bf(mavmapFile);
    if(!boost::filesystem::exists(bf))
    {
        std::cerr << "mavmap file '" << mavmapFile << "' does not exist!" << std::endl;
        return -1;
    }

    // create output directory
    boost::filesystem::path dir(outputFolder);
    boost::filesystem::create_directory(dir);

    // create Line3D++ object
    L3DPP::Line3D* Line3D = new L3DPP::Line3D(outputFolder,loadAndStore,maxWidth,
                                              maxNumSegments,false,useGPU);

    // read mavmap result
    std::ifstream mavmap_file;
    mavmap_file.open(mavmapFile.c_str());

    std::string mavmap_line;
    // check for comments...
    while(std::getline(mavmap_file,mavmap_line))
    {
        if(mavmap_line.substr(0,1) != "#")
            break;
    }

    // read camera data (sequentially)
    std::vector<std::string> cams_filenames;
    std::vector<std::pair<double,double> > cams_focals;
    std::vector<Eigen::Matrix3d> cams_rotation;
    std::vector<Eigen::Vector3d> cams_translation;
    std::vector<Eigen::Vector2d> cams_principle;
    do
    {
        if(mavmap_line.length() < 28)
            break;

        std::string filename,roll,pitch,yaw;
        std::string lat,lon,alt,h;
        std::string tx,ty,tz;
        std::string camID,camModel,fx,fy,cx,cy;

        std::stringstream mavmap_stream(mavmap_line);
        mavmap_stream >> filename >> roll >> pitch >> yaw;
        mavmap_stream >> lat >> lon >> alt >> h;
        mavmap_stream >> tx >> ty >> tz;
        mavmap_stream >> camID >> camModel >> fx >> fy >> cx >> cy;

        // check camera model
        if(camModel.substr(0,camModel.length()-1) != "PINHOLE")
        {
            std::cout << "only PINHOLE camera model supported..." << std::endl;
            return -1;
        }

        // filename
        cams_filenames.push_back(filename.substr(0,filename.length()-1));

        // rotation
        double r,y,p;
        std::stringstream  dat_stream(roll.substr(0,roll.length()-1));
        dat_stream >> r; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(pitch.substr(0,pitch.length()-1));
        dat_stream >> p; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(yaw.substr(0,yaw.length()-1));
        dat_stream >> y; dat_stream.str(""); dat_stream.clear();

        Eigen::Matrix3d R = Line3D->rotationFromRPY(r,p,y);

        // translation
        double Tx,Ty,Tz;
        dat_stream.str(tx.substr(0,tx.length()-1));
        dat_stream >> Tx; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(ty.substr(0,ty.length()-1));
        dat_stream >> Ty; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(tz.substr(0,tz.length()-1));
        dat_stream >> Tz; dat_stream.str(""); dat_stream.clear();

        Eigen::Vector3d t(Tx,Ty,Tz);

        // invert
        Eigen::MatrixXd Rt = Eigen::MatrixXd::Identity(4,4);
        Rt.block<3,3>(0,0) = R;
        Rt.block<3,1>(0,3) = t;
        Eigen::MatrixXd Rt_inv = Rt.inverse().eval().block<3, 4>(0,0);

        R = Rt_inv.block<3,3>(0,0);
        t = Rt_inv.block<3,1>(0,3);

        cams_rotation.push_back(R);
        cams_translation.push_back(t);

        // focal lengths
        double foc_x,foc_y;
        dat_stream.str(fx.substr(0,fx.length()-1));
        dat_stream >> foc_x; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(fy.substr(0,fy.length()-1));
        dat_stream >> foc_y; dat_stream.str(""); dat_stream.clear();

        cams_focals.push_back(std::pair<double,double>(foc_x,foc_y));

        // principle point
        double pp_x,pp_y;
        dat_stream.str(cx.substr(0,cx.length()-1));
        dat_stream >> pp_x; dat_stream.str(""); dat_stream.clear();
        dat_stream.str(cy.substr(0,cy.length()-1));
        dat_stream >> pp_y; dat_stream.str(""); dat_stream.clear();

        cams_principle.push_back(Eigen::Vector2d(pp_x,pp_y));

    }while(std::getline(mavmap_file,mavmap_line));
    mavmap_file.close();

    // load images (parallel)
#ifdef L3DPP_OPENMP
    #pragma omp parallel for
#endif //L3DPP_OPENMP
    for(unsigned int i=0; i<cams_rotation.size(); ++i)
    {
        // load image
        std::string img_filename = imgPrefix+cams_filenames[i];
        cv::Mat image;
        std::vector<boost::filesystem::wpath> possible_imgs;

        if(imgExtension.length() == 0)
        {
            // look for common image extensions
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".jpg"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".JPG"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".png"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".PNG"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".jpeg"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".JPEG"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".bmp"));
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+".BMP"));
        }
        else
        {
            // use given extension
            possible_imgs.push_back(boost::filesystem::wpath(inputFolder+"/"+img_filename+imgExtension));
        }

        bool image_found = false;
        unsigned int pos = 0;
        while(!image_found && pos < possible_imgs.size())
        {
            if(boost::filesystem::exists(possible_imgs[pos]))
            {
                image_found = true;
                img_filename = possible_imgs[pos].string();
            }
            ++pos;
        }

        if(image_found)
        {
            // load image
            image = cv::imread(img_filename,CV_LOAD_IMAGE_GRAYSCALE);

            // setup intrinsics
            double px = cams_principle[i].x();
            double py = cams_principle[i].y();
            double fx = cams_focals[i].first;
            double fy = cams_focals[i].second;

            Eigen::Matrix3d K = Eigen::Matrix3d::Zero();
            K(0,0) = fx;
            K(1,1) = fy;
            K(0,2) = px;
            K(1,2) = py;
            K(2,2) = 1.0;

            // set neighbors
            std::list<unsigned int> neighbor_list;
            size_t n_before = neighbors/2;
            for(int nID=int(i)-1; nID >= 0 && neighbor_list.size()<n_before; --nID)
            {
                neighbor_list.push_back(nID);
            }
            for(int nID=int(i)+1; nID < int(cams_rotation.size()) && neighbor_list.size() < neighbors; ++nID)
            {
                neighbor_list.push_back(nID);
            }

            // add to system
            Line3D->addImage(i,image,K,cams_rotation[i],
                             cams_translation[i],constRegDepth,neighbor_list);
        }
    }

    // match images
    Line3D->matchImages(sigmaP,sigmaA,neighbors,epipolarOverlap,
                        kNN,constRegDepth);

    // compute result
    Line3D->reconstruct3Dlines(visibility_t,diffusion,collinearity,useCERES);

    // save end result
    std::vector<L3DPP::FinalLine3D> result;
    Line3D->get3Dlines(result);

    // save as STL
    Line3D->saveResultAsSTL(outputFolder);
    // save as OBJ
    Line3D->saveResultAsOBJ(outputFolder);
    // save as TXT
    Line3D->save3DLinesAsTXT(outputFolder);
    // save as BIN
    Line3D->save3DLinesAsBIN(outputFolder);

    // cleanup
    delete Line3D;
}