예제 #1
0
파일: myjni.cpp 프로젝트: gitHugLin/room1
JNIEXPORT jfloatArray JNICALL calHomography(JNIEnv *env, jobject obj,jlong grayMatPtr,jboolean first)
{
    jfloatArray mHomography;
    mHomography = env->NewFloatArray(9);
    float prtHomography[9] = {1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0};
    Mat *texture = (Mat *)grayMatPtr;

    if(true == first){
        standardMat = *texture;
        env->SetFloatArrayRegion(mHomography, 0, 9, prtHomography);
        return mHomography;
    }
    else{
        normalMat = *texture;
        GetHomography homography(normalMat,standardMat);
        Mat Homography = homography.getHomography();
        if(!Homography.empty()){
            double *prt = (double *)Homography.data;
            for(int j = 0;j < 9; j++)
            {
                float date = *(double*)(prt + j);
                prtHomography[j] = date;
            }
        }
        else{
            prtHomography[0] = -1;
        }
/*        LOGE("Homography data analysis :\n");
        for(int j = 0;j < 9; j++)
            LOGE("Homography: Homography = %lf\n", prtHomography[j]);*/
        //imwrite("/mnt/internal_sd/APCamera/gray.jpg",*texture );
        env->SetFloatArrayRegion(mHomography, 0, 9, prtHomography);
        return mHomography;
    }
}
예제 #2
0
void display(void)
{
	GLfloat color[4] = {0.0, 0.8, 0.7, 1.0};//球の色指定
	glClearColor(1.0, 1.0, 1.0, 1.0); //背景の色指定
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	glEnable(GL_DEPTH_TEST);
	glEnable(GL_LIGHTING);
	glEnable(GL_LIGHT0);

	//glViewport(-640,0,1280,800);
	//glLoadIdentity();
	//gluPerspective( 151.927 , 1280/800 ,0.01 , 100);
	//gluLookAt(0.0, 0.0, 100.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);//視点
	glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color);

	u = 10.0 * count; v = 10.0*800.0/1280.0*count;
	//u=391.0;
	//v=265.0;
	homography(u,v);
	//std::cout <<u << std::endl;
	double x = (u/640.0-1.0)*(1280.0/800.0) ;
	double y = (v/400.0-1.0)*(-1.0);
	glLoadIdentity();
	glTranslated(x,y, z);
	count++;

	glutSolidSphere(0.1, 20, 20);//球の描画

	glDisable(GL_DEPTH_TEST);
	glDisable(GL_LIGHTING);
	glDisable(GL_LIGHT0);

	glutSwapBuffers();
}
예제 #3
0
void HomographyTransform2D::configure(const std::string& config_file, const std::string& config_key) {

    // Available options
    std::vector<std::string> options {"homography"};
    
    // This will throw cpptoml::parse_exception if a file 
    // with invalid TOML is provided
    cpptoml::table config;
    config = cpptoml::parse_file(config_file);

    // See if a camera configuration was provided
    if (config.contains(config_key)) {

        // Get this components configuration table
        auto this_config = config.get_table(config_key);

        // Check for unknown options in the table and throw if you find them
        oat::config::checkKeys(options, this_config);
        
        // Homography matrix
        oat::config::Array homo_array;
        if (oat::config::getArray(this_config, "homography", homo_array, 9, true)) {

            auto homo_vec = homo_array->array_of<double>();
            
            homography(0, 0) = homo_vec[0]->get();
            homography(0, 1) = homo_vec[1]->get();
            homography(0, 2) = homo_vec[2]->get();
            homography(1, 0) = homo_vec[3]->get();
            homography(1, 1) = homo_vec[4]->get();
            homography(1, 2) = homo_vec[5]->get();
            homography(2, 0) = homo_vec[6]->get();
            homography(2, 1) = homo_vec[7]->get();
            homography(2, 2) = homo_vec[8]->get();

            homography_valid = true;
        }
    } else {
        throw (std::runtime_error(oat::configNoTableError(config_key, config_file)));
    }
}
예제 #4
0
int main(int argc, char **argv)
{
  cout << "Starting" << endl;
  double point[2];
  double * current = new double[9];
  double * best = new double[9];
  double * init = new double[9];
  double ncc;
  double bestncc = -2;
  double first;
  double scale = 1;
  //int position = 0;
  //int direction = 1;
  bool optimize = true;

 cout << "Starting" << endl; 
 vector<PixelLoc> interior;
 for(int i=9; i<=23; ++i){
  for(int j=9; j<=23; ++j){
  PixelLoc point(i, j);
  interior.push_back(point);
  }
 }
cout << "Creating Images";
 Image myimg("test-initial.ppm");
 Image myimgOther("test-final.ppm");
cout << "Images created";
 for(int i=0;i<9;++i){
    init[i] = current[i] = best[i] = 0; 
 }
    init[8] = current[8] = best[8] = 1;
    init[0] = current[0] = best[0] = 1;
    init[4] = current[4] = best[4] = 1;

  cout << "initial homography: " << endl;
  for (int i = 0; i < 9; i++){
    cout << init[i] << " ";
  }

Color red(255,0,0);
Color blue(0,0,100);

Image imgInitial = myimg;
Image src = myimgOther;

for(unsigned int i=0; i<interior.size(); ++i){
   homography(interior[i].x, interior[i].y, current, point);
   PixelLoc loc((int)point[0], (int)point[1]);
   if(inImage(&imgInitial,loc)){
       imgInitial.setPixel(loc,blue);
   }
}

imgInitial.print("initial.ppm");

for(unsigned int i=0; i<interior.size(); ++i){
   if(inImage(&src,interior[i])){
      src.setPixel(interior[i],blue);
   }
}
src.print("src.ppm");

cout << endl;

if(optimize){
  Optimize (scale, first, ncc, bestncc, &interior, init, current, best, &myimg, &myimgOther);
}
 
 cout << "First: " << first << " Best: " << bestncc << endl;
 cout << "homography: "; 
 for(int i=0;i<9;++i){
  cout << current[i] << " ";
 } 
 cout << endl;
Image imgFinal = myimg;

printHomographyTile(&imgFinal,&imgInitial,interior,best);
system("/home/mscs/bin/show src.ppm initial.ppm final.ppm");
}
예제 #5
0
파일: main.cpp 프로젝트: Barbakas/windage
void  main()
{
	char message[100];
	cvNamedWindow("template");
	cvNamedWindow("sampling");
	cvNamedWindow("result");

	// initialize
	sprintf_s(message, IMAGE_SEQ_FILE_NAME, 0);
	IplImage* saveImage = cvLoadImage(message, 0);
	if(GAUSSIAN_BLUR > 0)
		cvSmooth(saveImage, saveImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

	int width = saveImage->width;
	int height = saveImage->height;
	int startX = (width-TEMPLATE_WIDTH)/2;
	int startY = (height-TEMPLATE_HEIGHT)/2;
	float q = TEMPLATE_WIDTH * TEMPLATE_HEIGHT;
	
	IplImage* inputImage = NULL;
	IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* samplingImage = NULL;

	// set template image
	CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	cvSetImageROI(saveImage, rect);
	cvCopyImage(saveImage, templateImage);
	cvShowImage("template", templateImage);
	cvResetImageROI(saveImage);
	cvReleaseImage(&saveImage);

	// initial homography
	windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
	homography._13 = startX;
	homography._23 = startY;
	windage::Matrix3 e = homography;

	// Template based Tracking using Inverse Compositional
	windage::InverseCompositional* ic = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	ic->AttatchTemplateImage(templateImage);
	ic->SetInitialHomography(e);
	ic->Initialize();

	// homography update stack
	std::vector<windage::Matrix3> homographyList;

	cvWaitKey();

	float sumTime = 0.0;
	int sumIter = 0;
	bool processing =true;
	int k = 0;
	while(processing)
	{
		if(k >= IMAGE_SEQ_COUNT)
			processing = false;

		// load image
		if(inputImage) cvReleaseImage(&inputImage);
		sprintf_s(message, IMAGE_SEQ_FILE_NAME, k);
		inputImage = cvLoadImage(message, 0);
		if(GAUSSIAN_BLUR > 0)
			cvSmooth(inputImage, inputImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

		cvCvtColor(inputImage, resultImage, CV_GRAY2BGR);

		// processing
		int64 startTime = cvGetTickCount();
		
		float error = 0.0;
		float delta = 1.0;
		int iter = 0;
		homographyList.clear();
		for(iter=0; iter<MAX_ITERATION; iter++)
		{
			error = ic->UpdateHomography(inputImage, &delta);
			homography = ic->GetHomography();
			samplingImage = ic->GetSamplingImage();

			homographyList.push_back(homography);

//			if(delta < HOMOGRAPHY_DELTA)
//				break;
		}
		int64 endTime = cvGetTickCount();
		k++;
		sumIter+= iter;

		// draw result
		int count = homographyList.size();
		for(int i=0; i<count; i++)
 			DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(float)count) * 255.0, (i/(float)count) * 255.0, 0), 1);
 		
		double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "%03d >> processing time : %.2lf ms (%02d iter), error : %.2lf", k, processingTime, iter, error);
		std::cout << message << std::endl;
		sumTime += processingTime;

		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), message, 0.6);

		// draw image
		cvShowImage("sampling", samplingImage);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case ' ':
			{
				char tempch = cvWaitKey(0);
				if(tempch == 's' || tempch == 'S')
				{
					cvSaveImage("ICAlgorithm.jpg", resultImage);
				}
			}
			break;
		case 'i':
		case 'I':
			k++;
			break;
		case 'o':
		case 'O':
			k-=5;
			break;
		case 'q':
		case 'Q':
			processing = false;
			break;
		}

	}

	std::cout << "average iteration : " << sumIter/(float)IMAGE_SEQ_COUNT << std::endl;
	std::cout << "average processing ime : " << sumTime/(float)IMAGE_SEQ_COUNT << " ms" << std::endl;

	cvReleaseImage(&resultImage);
	cvDestroyAllWindows();
}
예제 #6
0
파일: main.cpp 프로젝트: Barbakas/windage
void  main()
{
	char message[100];
	cvNamedWindow("template");
	cvNamedWindow("sampling");
	cvNamedWindow("result");

	// initialize
	int width = WIDTH;
	int height = HEIGHT;
	int startX = (width-TEMPLATE_WIDTH)/2;
	int startY = (height-TEMPLATE_HEIGHT)/2;
	
	IplImage* inputImage = NULL;
	IplImage* grayImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* samplingImage = NULL;

	// initial template & homography
	CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
	homography._13 = startX;
	homography._23 = startY;
	windage::Matrix3 e = homography;

	// Template based Tracking using Inverse Compositional
#if USE_IC
	windage::InverseCompositional* tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
	windage::HomographyESM* tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
	tracker->SetInitialHomography(e);

	// homography update stack
	std::vector<windage::Matrix3> homographyList;

	// camera
	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);

	bool isTrained = false;
	bool processing =true;
	while(processing)
	{
		inputImage = cvRetrieveFrame(capture);
		cvResize(inputImage, resultImage);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		
		if(GAUSSIAN_BLUR > 0)
			cvSmooth(grayImage, grayImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

		// processing
		int64 startTime = cvGetTickCount();
		
		float error = 0.0;
		float delta = 1.0;
		int iter = 0;
		homographyList.clear();
		for(iter=0; iter<MAX_ITERATION; iter++)
		{
			error = tracker->UpdateHomography(grayImage, &delta);
			homography = tracker->GetHomography();
			homographyList.push_back(homography);

//			if(delta < HOMOGRAPHY_DELTA)
//				break;
		}
		int64 endTime = cvGetTickCount();
		samplingImage = tracker->GetSamplingImage();

		// draw result
		int count = homographyList.size();
		for(int i=0; i<count; i++)
 			DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(double)count) * 255.0, (i/(double)count) * 255.0, 0), 1);
 		
		double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "processing time : %.2lf ms (%02d iter), error : %.2lf", processingTime, iter, error);
		std::cout << message << std::endl;

#if USE_IC
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Inverse Compositional", 0.6);
#endif
#if USE_ESM
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Efficient Second-order Minimization", 0.6);
#endif
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 35), message, 0.6);

		// draw image
		cvShowImage("sampling", samplingImage);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case ' ':
			cvSetImageROI(grayImage, rect);
			cvCopyImage(grayImage, templateImage);
			cvShowImage("template", templateImage);
			cvResetImageROI(grayImage);

			tracker->AttatchTemplateImage(templateImage);
			tracker->SetInitialHomography(e);
			tracker->Initialize();
			break;
		case 'r':
		case 'R':
			delete tracker;
			tracker = NULL;
#if USE_IC
			tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
			tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
			tracker->SetInitialHomography(e);
			break;
		case 'q':
		case 'Q':
			processing = false;
			break;
		}

	}

	cvReleaseCapture(&capture);

	cvReleaseImage(&grayImage);
	cvReleaseImage(&resultImage);
	cvDestroyAllWindows();
}
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }
예제 #8
0
double controller::start(QStringList arguments)
{
    //start timer
    double t = (double)cv::getTickCount();

    //parse arguments
    if(arguments[1]=="-find-homography")
    {
        try
        {
            QString under = arguments[2];
            QString over = arguments[3];
            QString csv_path = arguments[4];
            QString method = arguments[5];
            double feature_t = arguments[6].toDouble();
            double nndr = arguments[7].toDouble();
            double sift_gamma = arguments[8].toDouble();
            double brightness_offset = arguments[9].toDouble();
            int iso = arguments[10].toInt();
            matcher::find_Homography(under, over, csv_path, sift_gamma, iso, feature_t, nndr, method, brightness_offset);
        }
        catch(...)
        {
            std::cout << "Parameter of '-find-homography' unreadable. Type -help for a list of possible parameters."<< std::endl;
        }
    }
    else if(arguments[1]=="-merge")
    {
        try
        {
            QString under = arguments[2];
            QString over = arguments[3];
            cv::Mat homography(3,3,6);
            for (int i = 0; i < 3; i++)
            {
                for (int j = 0; j < 3; j++)
                {
                    homography.at<double>(i,j) = arguments[(i*3)+j+4].toDouble();
                }
            }
            QString out = arguments[13];
            double brightness_offset = arguments[14].toDouble();
            int iso = arguments[15].toInt();
            double lower = arguments[16].toDouble();
            double upper = arguments[17].toDouble();
            Renderer::render(under,over,upper,lower,homography,out,iso,brightness_offset,true);
        }
        catch(...)
        {
            std::cout << "Parameter of '-merge' unreadable. Type -help for a list of possible parameters."<< std::endl;
        }
    }
    else if(arguments[1]=="-transform")
    {
        try
        {
            QString image = arguments[2];
            cv::Mat homography(3,3,6);
            for (int i = 0; i < 3; i++)
            {
                for (int j = 0; j < 3; j++)
                {
                    homography.at<double>(i,j) = arguments[(i*3)+j+3].toDouble();
                }
            }
            QString out = arguments[12];
            int iso = arguments[13].toInt();
            Renderer::render(image,"0",0,0,homography,out,iso,0,false);
        }
        catch(...)
        {
            std::cout << "Parameter of '-transform' unreadable. Type -help for a list of possible parameters."<< std::endl;
        }
    }
    else if(arguments[1]=="-help" || arguments[1]=="-h")
    {
        std::cout << "HDR Merger Help:" << std::endl;
        std::cout << std::endl;
        std::cout << "Parameter:" << std::endl;
        std::cout << std::endl;
        std::cout << "-find-homography" << std::endl;
        std::cout << "  Image_Input_Lowlight_Preserving_Path" << std::endl;
        std::cout << "  Image_Input_Highlight_Preserving_Path" << std::endl;
        std::cout << "  CSV_Output_Path" << std::endl;
        std::cout << "  Method (currently only 'sift')" << std::endl;
        std::cout << "  Params(feat_tres, neares_neighbour_distance_ratio, gamma, brightness_multikator_in_lin_light , ISO)" << std::endl;
        std::cout << "  (All seperated by a space.)" << std::endl;
        std::cout << std::endl;
        std::cout << "-merge" << std::endl;
        std::cout << "  Image_Input_Lowlight_Preserving_Path" << std::endl;
        std::cout << "  Image_Input_Highlight_Preserving_Path" << std::endl;
        std::cout << "  Homography Matrix (each element seperated with space)" << std::endl;
        std::cout << "  Merged_Image_Output_path" << std::endl;
        std::cout << "  brightness_multiplikator_in_lin_light" << std::endl;
        std::cout << "  ISO (only '800' and ‘1600‘ supported)" << std::endl;
        std::cout << "  LowerBlendTreshold" << std::endl;
        std::cout << "  UpperBlendTreshold" << std::endl;
        std::cout << std::endl;
        std::cout << "-transform" << std::endl;
        std::cout << "  Image_Input_Path" << std::endl;
        std::cout << "  Homography Matrix (each element seperated with space)" << std::endl;
        std::cout << "  Transformed_Image_Output_Path" << std::endl;
        std::cout << "  ISO (only '800' and ‘1600‘ supported)" << std::endl;
    }
    else
    {
        std::cout << "Parameter unknown. Type -help for a list of possible functions."<< std::endl;
    }

    double timer = ((double)(((int)(100.0*(((double)cv::getTickCount() - t)/cv::getTickFrequency())))/100.0));
    return timer;
}