Exemple #1
0
/**
 * This demonstrates homomorphic operations being done
 * on images. An input image is encrypted pixelwise and
 * homomorphically. Then a homomorphic transformation 
 * is done on it. In this case we convert the RGB values 
 * of the pixels of the image to HSV (Hue, Saturation, Value).
 * Then we rotate the hue by a constant amount, morphing
 * the color of the image. The result is then decrypted
 * and displayed next to the original.
 *
 * Due to the constraints of homomorphic computation the
 * image is scaled down to be of size 100 by 100 pixels.
 * Then we perform batch homomorphic computation on a 
 * vector of pixels of size ~10,000.
 */
int main(int argc, char * argv[]) {
  if (argc < 2) {
    std::cerr << "Usage: " << argv[0] << " ImageFile" << std::endl;
    return 1;
  }

  // Our image input
  cimg_library::CImg<unsigned char> img(argv[1]);

  clock_t start, end;

  start = clock();

  // Generate parameters for the YASHE protocol
  // and create environment.
  YASHE SHE = YASHE::readFromFile("resources/8BitFHE");
  //long t = 257;
  //NTL::ZZ q = NTL::GenPrime_ZZ(400);
  //long d = 22016; // 2^9*43 - 5376 factors
  //long sigma = 8;
  //NTL::ZZ w = NTL::power2_ZZ(70);
  //YASHE SHE(t,q,d,sigma,w);

  end = clock();
  std::cout << "Reading parameters completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;

  std::cout << SHE.getNumFactors() << " factors." << std::endl;

  // Resize the image so we can pack it into a single
  // cipher text
  if (img.width() * img.height() > SHE.getNumFactors()) {
    double scalingFactor = SHE.getNumFactors()/double(img.width() * img.height());
    scalingFactor = sqrt(scalingFactor);
    long newWidth = img.width() * scalingFactor;
    long newHeight = img.height() * scalingFactor;
    img.resize(newWidth,newHeight, 1, 3, 5);
  }

  // Define a color space of 256 colors
  cimg_library::CImg<unsigned char> colorMap =
    cimg_library::CImg<unsigned char>::default_LUT256();

  // Convert the image into a list of integers
  // each integer represents a pixel defined
  // by the color mapping above. 
  std::vector<long> message;
  ImageFunctions::imageToLongs(message, img, colorMap);

  // In order for the output to reflect the
  // input we change img to be exactly the
  // image we are encrypting - quantized
  // to 256 colors.
  ImageFunctions::longsToImage(message, img, colorMap);

  // Resize the message so that it fills the entire
  // message space (the end of it will be junk)
  message.resize(SHE.getNumFactors());

  // Define a function on pixels.
  // This function takes a pixel value (0 - 255)
  // and returns another pixel value representing
  // the inversion of that pixel value.
  std::function<long(long)> invertColors = [colorMap](long input) {
    unsigned char r, g, b;
    double h, s, v;
    ImageFunctions::longToRGB(input, r, g, b, colorMap);
    ImageFunctions::RGBtoHSV(r, g, b, &h, &s, &v);

    // rotate hue by 30 degrees
    h = fmod(h - 75, 360);
    //s = pow(s, 4.);

    ImageFunctions::HSVtoRGB(&r, &g, &b, h, s, v);
    ImageFunctions::RGBToLong(input, r, g, b, colorMap);

    return input;
  };
  // The function is converted into
  // a polynomial of degree t = 257
  std::vector<long> poly = Functions::functionToPoly(invertColors, 257);

  start = clock();

  // Generate public, secret and evaluation keys
  NTL::ZZ_pX secretKey = SHE.keyGen();

  end = clock();
  std::cout << "Key generation completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // encrypt the message
  YASHE_CT ciphertext = SHE.encryptBatch(message);

  end = clock();
  std::cout << "Encryption completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // evaluate the polynomial
  YASHE_CT::evalPoly(ciphertext, ciphertext, poly);

  end = clock();
  std::cout << "Polynomial evaluation completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // decrypt the message
  std::vector<long> decryption = SHE.decryptBatch(ciphertext, secretKey);

  end = clock();
  std::cout << "Decryption completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;

  // turn the message back into an image
  cimg_library::CImg<unsigned char> outputImg(img.width(), img.height(), 1, 3);
  ImageFunctions::longsToImage(decryption, outputImg, colorMap);

  // Display the input next to the output!
  (img, outputImg).display("result!",false);

  return 0;
}
Exemple #2
0
int main(int argc, char *argv[])
{

	/**
	 * Print the Help
	 */
  if (argc < 2) {
    fprintf(stderr,"Usage:""\n%s imageData1 imageData2", argv[0]);
    fprintf(stderr,"   or:""\n%s camera", argv[0]);
    fprintf(stderr, "\n\ngeneral usage further parameters: -option number\n");
    fprintf(stderr, "-l  -lambda \t\t defines lambda [default=5.0]\n");
    fprintf(stderr, "-oi -outeriterations \t defines outeriterations [default=40]\n");
    fprintf(stderr, "-ii -inneriterations \t defines inneriterations [default=2]\n");
    fprintf(stderr, "-sl -startlevel \t defines startlevel [default=3]\n");
    fprintf(stderr, "-el -endlevel \t\t defines endlevel [default=0]\n");
    fprintf(stderr, "-cd  -computationdevice  defines the computation device [default=0], 0 = CPU, 1 = GPU\n");
    fprintf(stderr, "-w  -width \t\t defines camera capture size [default=320]\n");
    fprintf(stderr,"\n\n");
    return 0;
  }

  /**
   * Standard Parameters
   */
  int start_level = 3;
  int end_level = 0;
  int outer_iterations = 40;
  int inner_iterations = 2;
  float lambda = 0.2f;
  int cameraWidth = 320, cameraHeight = 240;

  int computationDevice = 0;

  std::string partype = "";
  std::string parval = "";

  std::string arg1 = argv[1];

  /**
   * Read Parameters from Input
   */
  int parnum;
  if (arg1 != "camera")
    parnum = 3;
  else
    parnum = 2;
  while(parnum < argc) {
  	partype = argv[parnum++];
  	if(partype == "-l" || partype == "-lambda") {
  		lambda = (float)atof(argv[parnum++]);
  		fprintf(stderr,"\nSmoothness Weight: %f", lambda);
  	}
  	else if(partype == "-oi" || partype == "-outeriterations") {
  		outer_iterations = atoi(argv[parnum++]);
  		fprintf(stderr,"\nOuter Iterations: %i", outer_iterations);
  	}
  	else if(partype == "-ii" || partype == "-inneriterations") {
  		inner_iterations = atoi(argv[parnum++]);
  		fprintf(stderr,"\nInner Iterations: %i", inner_iterations);
  	}
  	else if(partype == "-sl" || partype == "-startlevel") {
  		start_level = atoi(argv[parnum++]);
  		fprintf(stderr,"\nStart Level: %i", start_level);
  	}
  	else if(partype == "-el" || partype == "-endlevel") {
  		end_level = atoi(argv[parnum++]);
  		fprintf(stderr,"\nEnd Level: %i", end_level);
  	}
  	else if ((partype == "-w" || partype == "-width") && strcmp(argv[1], "camera") == 0) {
  		cameraWidth = atoi(argv[parnum++]);
  		cameraHeight = (int)(cameraWidth*3/4);
  		fprintf(stderr,"\nCamera Width: %i\nCamera Height: %i", cameraWidth, cameraHeight);
  	}
  	else if(partype == "-cd"|| partype == "-computationdevice"){
  		std::string parval = argv[parnum++];
  		if(parval == "0")computationDevice = 0;
  		if(parval == "1")computationDevice = 1;
  		fprintf(stderr,"\nComputation Device: %s",computationDevice ? "GPU" : "CPU");
  	}
  }

  if (arg1 != "camera" && arg1 != "video") {

    cv::Mat img1 = cv::imread(argv[1], 0); // 0 = force the image to be grayscale
    cv::Mat img2 = cv::imread(argv[2], 0);

    if (img1.rows != img2.rows || img1.cols != img2.cols) {
    	fprintf(stderr,
    			"\nError: Image formats are not equal: (%i|%i) vs. (%i|%i)",
    			img1.cols,img1.rows,img2.cols,img2.rows);
      return 1;
    }

    fprintf(stderr,"\nInput Image Dimensions: (%i|%i)",img1.cols,img1.rows);

    img1.convertTo(img1,CV_32FC1);
    img2.convertTo(img2,CV_32FC1);

    fprintf(stderr,"\nCreating FlowLib Object");

    FlowLib *flowlib = NULL;
    if(computationDevice == 1)
    	flowlib = new FlowLibGpuSOR(img1.cols,img1.rows);
    else if(computationDevice == 0)
    	flowlib = new FlowLibCpuSOR(img1.cols,img1.rows);


    fprintf(stderr,"\nSetting up Flow");
    flowlib->setLambda(lambda);
		flowlib->setOuterIterations(outer_iterations);
		flowlib->setInnerIterations(inner_iterations);
		flowlib->setStartLevel(start_level);
		flowlib->setEndLevel(end_level);

    flowlib->setInput((float*)img1.data);
    flowlib->setInput((float*)img2.data);

    fprintf(stderr,"\nComputing Flow");

    flowlib->computeFlow();


    float *u = new float[img1.cols*img1.rows*2];
    float *v = new float[img1.cols*img1.rows];
    flowlib->getOutput(u,v);
    for(int x=img1.cols*img1.rows-1;x>=0;x--){
    	u[2*x] = u[x];
    	u[2*x+1] = v[x];
    }
    save_flow_file("output.flo",u, img1.cols, img1.rows);
    delete [] u;

    fprintf(stderr,"\nDisplaying Output");

    cv::Mat outputImg(cv::Size(img1.cols,img1.rows),CV_32FC3);

    flowlib->getOutputBGR((float*)outputImg.data,0.0f,1.0f,0.0f,1.0f);


    cv::imshow("Input Image 1",img1/255.0f);
    cv::imshow("Input Image 2",img2/255.0f);
    cv::imshow("Output Image",outputImg);

    cv::imwrite(computationDevice ? "flowImageGPU.png" : "flowImageCPU.png",outputImg*255.0f);

    cvMoveWindow("Input Image 1", 100, 100);
    cvMoveWindow("Input Image 2", 450, 100);
    cvMoveWindow("Output Image", 100, 400);

    printf("\nPress Esc on the image to exit...\n");
    cv::waitKey(0);

    fprintf(stderr,"\nDeleting Flow Object");
    delete flowlib;
    fprintf(stderr,"\nFlow Object Deleted.");

  } // endif image file

  //----------------------------------------------------------------------------
  // Camera Mode
  //----------------------------------------------------------------------------
  else{

  	cv::VideoCapture *capture = 0;
  	bool videomode = arg1 == "video";
  	if(videomode){
    	fprintf(stderr,"\nReading Images from Video %s",argv[2]);
  		capture = new cv::VideoCapture(argv[2]);
  	}
  	else{
    	fprintf(stderr,"\nReading Images from Camera");
    	capture = new cv::VideoCapture(0);
  	}
  	if(!capture->isOpened()){
  		std::cerr << std::endl << "Could not Open Capture Device";
  		return -1;
  	}
  	if(!videomode){
  		fprintf(stderr,"\nSetting Camera up to (%i|%i)",cameraWidth,cameraHeight);
			capture->set(CV_CAP_PROP_FRAME_WIDTH,cameraWidth);
			capture->set(CV_CAP_PROP_FRAME_HEIGHT,cameraHeight);
  	}
  	cv::Mat *img1 = new cv::Mat();
  	cv::Mat *img2 = new cv::Mat();



  	(*capture) >> (*img1);
  	fprintf(stderr,"\nImage Dimensions: (%i|%i)",img1->cols,img1->rows);

    float *u = new float[img1->cols*img1->rows*2];
    float *v = new float[img1->cols*img1->rows];

    fprintf(stderr,"\nCreating FlowLib Object");
    FlowLib *flowlib = NULL;
    if(computationDevice == 1)
    	flowlib = new FlowLibGpuSOR(img1->cols,img1->rows);
    else if(computationDevice == 0)
    	flowlib = new FlowLibCpuSOR(img1->cols,img1->rows);
    fprintf(stderr,"\nFlowLib Object created.");

    fprintf(stderr,"\nSetting up Flow");

    flowlib->setLambda(lambda);
		flowlib->setOuterIterations(outer_iterations);
		flowlib->setInnerIterations(inner_iterations);
		flowlib->setStartLevel(start_level);
		flowlib->setEndLevel(end_level);

    fprintf(stderr,"\nFlow Setup Finished");

  	cv::namedWindow("Input Image",1);
  	cv::namedWindow("Flow",1);

    cv::Mat outputImg(cv::Size(img1->cols,img1->rows),CV_32FC3);

//    int counter = 0;

  	while (cv::waitKey(30) < 0
  			){
  		(*capture) >> (*img1);
  		cv::cvtColor(*img1,*img1,CV_BGR2GRAY);
  		img1->convertTo(*img1,CV_32FC1);
  		flowlib->setInput((float*)img1->data);
      printf("\n%f fps",1000.0f/flowlib->computeFlow());
      flowlib->getOutputBGR((float*)outputImg.data,0.0f,5.0f,0.0f,1.0f);

  		cv::imshow("Input Image",(*img1)/255.0f);
  		cv::imshow("Flow",outputImg);
      cv::Mat *temp = img1; img1 = img2; img2 = temp;
  	}
  	delete capture;

  	delete [] u;
  	delete [] v;

  } // endif camera

  fprintf(stderr,"\n\n");
  return 0;
}
void VehicleDetectionSystem::update(const cv::Mat* a_imageFrame, 
    std::shared_ptr<std::vector<std::shared_ptr<DetectedVehicle>>> verifiedVehicles, 
    double timeStamp)
{
  //clock_t timeBegin = clock();


  // Save a copy of the source image so that we don't change the original one
  cv::Mat srcImg(a_imageFrame->size(), a_imageFrame->type());
  a_imageFrame->copyTo(srcImg);

  //cv::Mat srcImg = a_imageFrame->clone();
  cv::resize(srcImg, srcImg, cv::Size(m_windowWidth, m_windowHeight), 
      0, 0, cv::INTER_CUBIC);

  cv::Mat outputImg(srcImg.size(), srcImg.type());
  srcImg.copyTo(outputImg);

  //cv::Mat outputImg = srcImg.clone();
  //GaussianBlur(srcImg, srcImg, cv::Size(3,3), 1);

  // Extract the sub image of the region of interest
  cv::Mat imageROI = srcImg(m_regionOfInterest);
  m_haarClassifier.detectMultiScale(imageROI, m_potentialVehicles);
  //haarClassifier.detectMultiScale(imageROI, detectedVehicles, 1.03, 3, 0 | CASCADE_FIND_BIGGEST_OBJECT, Size(10, 10), Size(150, 150));
  //haarClassifier.detectMultiScale(imageROI, detectedVehicles, 1.03, 1, 0, Size(10, 10), Size(150, 150));
  //haarClassifier.detectMultiScale(imageROI, detectedVehicles, 1.1, 3, 0 | CASCADE_FIND_BIGGEST_OBJECT, Size(20, 20), Size(150, 150));

  //clock_t timeHaar = clock();
  //std::cout << "        Detected " << m_potentialVehicles.size() << " potential vehicles\n";


  cv::Mat subImage;

  std::cout << "Potential #vehicles: " << m_potentialVehicles.size() << 
      std::endl;

  for (size_t i = 0; i < m_potentialVehicles.size(); i++) {
    //cout << "Inside for loop \n";
    cv::Rect r = m_potentialVehicles[i];
    r.y += m_roiTop;
    
    //cv::imshow("Debug", srcImg(r));
    //cv::waitKey(0);
    //cv::Mat featureMat(nrOfFeatures, 1, CV_32FC1, featureVector);
    subImage = cv::Mat(srcImg(r).size(), srcImg(r).type());
    srcImg(r).copyTo(subImage);
    //subImage = srcImg(r).clone();
    float result = m_ann.doClassification(&subImage);
    std::cout << "ann result: " << result << std::endl;
    bool saveImages = false;
    std::string saveImgPath = "tmp_img/";

    if (result > 0) {
      // if this subimage gets verified by SVM, draw green
      cv::rectangle(outputImg, r, cv::Scalar(0,255,0));
      std::shared_ptr<DetectedVehicle> detectedVehicle(
          new DetectedVehicle(r, timeStamp));
      verifiedVehicles->push_back(detectedVehicle);
      if (saveImages) {
        std::string imgFileName = saveImgPath + "pos" + 
            std::to_string(m_imgCounter) + ".jpg";
        imwrite(imgFileName, subImage);
        m_imgCounter++;
      }
    } else {
      // otherwise, if not verified, draw gray
      cv::rectangle(outputImg, r, cv::Scalar(0,0,255));
      if (saveImages) {
        std::string imgFileName = saveImgPath + "neg" + 
            std::to_string(m_imgCounter) + ".jpg";
        imwrite(imgFileName, subImage);
        m_imgCounter++;
      }
    }

    //std::cout << "ANN response: " << result << "\n";
    //cv::imshow("Debug", subImage);
    //cv::waitKey(10);
    //cv::rectangle(outputImg, r, cv::Scalar(0,255,0));
    subImage.release();
  }
  //clock_t timeAnn = clock();

  //float f_timeHaar = (float) (timeHaar-timeBegin)/CLOCKS_PER_SEC;
  //float f_timeAnn = (float) (timeAnn-timeHaar)/CLOCKS_PER_SEC;
  //std::cout << "    (timeHaar, timeAnn): (" << f_timeHaar << ", " << f_timeAnn << ")\n";

  // Create the window
  //cv::imshow("VehicleDetectionSystem", outputImg);
  //cv::moveWindow("VehicleDetectionSystem", 100, 100);
  //cv::waitKey(10);
  subImage.release();
  imageROI.release();
  outputImg.release();
  srcImg.release();
}
void FaceFeatureRecognitionApp::GenerateFalsePositives(void)
{
    std::string fileName;
    char filterName[] = "Land Files(*.txt)\0*.txt\0";
    MagicCore::ToolKit::FileOpenDlg(fileName, filterName);
    std::string imgPath = fileName;
    std::string::size_type pos = imgPath.rfind("/");
    if (pos == std::string::npos)
    {
        pos = imgPath.rfind("\\");
    }
    imgPath.erase(pos);
    std::ifstream fin(fileName);
    int dataSize;
    fin >> dataSize;
    const int maxSize = 512;
    char pLine[maxSize];
    fin.getline(pLine, maxSize);
    double maxOverlapRate = 0.5;
    std::string outputPath = "./NonFacefw2/nonFace2_fw_";
    int outputSize = 32;
    int outputId = 0;
    for (int dataId = 0; dataId < dataSize; dataId++)
    {
        DebugLog << "dataId: " << dataId << " dataSize: " << dataSize << std::endl;
        fin.getline(pLine, maxSize);
        std::string landName(pLine);
        landName = imgPath + landName;
        std::string imgName = landName;
        std::string posName = landName;
        std::string::size_type pos = imgName.rfind(".");
        imgName.replace(pos, 5, ".jpg");
        pos = posName.rfind(".");
        posName.replace(pos, 5, ".pos");

        std::ifstream posFin(posName);
        int faceRow, faceCol, faceLen;
        posFin >> faceRow >> faceCol >> faceLen;
        posFin.close();

        cv::Mat img = cv::imread(imgName);
        cv::Size cvHalfSize(img.cols / 2, img.rows / 2);
        cv::Mat halfImg(cvHalfSize, CV_8UC1);
        cv::resize(img, halfImg, cvHalfSize);

        //cv::Mat halfImg = cv::imread(imgName);

        std::vector<int> faces;
        int detectNum = mpFaceDetection->DetectFace(halfImg, faces);
        for (int detectId = 0; detectId < detectNum; detectId++)
        {
            int detectBase = detectId * 4;
            int detectRow = faces.at(detectBase);
            int detectCol = faces.at(detectBase + 1);
            int detectLen = faces.at(detectBase + 2);
            if (CalculateOverlapRate(faceRow, faceCol, faceLen, detectRow, detectCol, detectLen) < maxOverlapRate)
            {
                cv::Mat detectImg(detectLen, detectLen, CV_8UC1);
                for (int hid = 0; hid < detectLen; hid++)
                {
                    for (int wid = 0; wid < detectLen; wid++)
                    {
                        detectImg.ptr(hid, wid)[0] = halfImg.ptr(detectRow + hid, detectCol + wid)[0];
                    }
                }
                cv::Size cvOutputSize(outputSize, outputSize);
                cv::Mat outputImg(cvOutputSize, CV_8UC1);
                cv::resize(detectImg, outputImg, cvOutputSize);
                detectImg.release();
                std::stringstream ss;
                ss << outputPath << outputId << ".jpg";
                std::string outputImgName;
                ss >> outputImgName;
                outputId++;
                cv::imwrite(outputImgName, outputImg);
                outputImg.release();
            }
        }
        halfImg.release();
    }
void FaceFeatureRecognitionApp::GenerateNonFaceFromFace(void)
{
    std::string fileName;
    char filterName[] = "Land Files(*.txt)\0*.txt\0";
    MagicCore::ToolKit::FileOpenDlg(fileName, filterName);
    std::string imgPath = fileName;
    std::string::size_type pos = imgPath.rfind("/");
    if (pos == std::string::npos)
    {
        pos = imgPath.rfind("\\");
    }
    imgPath.erase(pos);
    std::ifstream fin(fileName);
    int dataSize;
    fin >> dataSize;
    int cropSize = 40;
    int outputSize = 64;
    cv::Size cvOutputSize(outputSize, outputSize);
    int imgNumPerData = 1;
    srand(time(NULL));
    for (int dataId = 0; dataId < dataSize; dataId++)
    {
        std::string imgName;
        fin >> imgName;
        imgName = imgPath + imgName;
        pos = imgName.rfind(".");
        imgName.replace(pos, 5, "_gray.jpg");
        /*cv::Mat imgOrigin = cv::imread(imgName);
        cv::Mat grayImg;
        cv::cvtColor(imgOrigin, grayImg, CV_BGR2GRAY);
        imgOrigin.release();*/
        cv::Mat grayImg = cv::imread(imgName);
        int imgH = grayImg.rows;
        int hMax = imgH - cropSize;
        int imgW = grayImg.cols;
        int wMax = imgW - cropSize;
        int synImgNum = 0;
        while (synImgNum < imgNumPerData)
        {
            int sH = rand() % hMax;
            int sW = rand() % wMax;
            cv::Mat cropImg(cropSize, cropSize, CV_8UC1);
            for (int hid = 0; hid < cropSize; hid++)
            {
                for (int wid = 0; wid < cropSize; wid++)
                {
                    cropImg.ptr(hid, wid)[0] = grayImg.ptr(sH + hid, sW + wid)[0];
                }
            }
            cv::Mat outputImg(cvOutputSize, CV_8UC1);
            cv::resize(cropImg, outputImg, cvOutputSize);
            cropImg.release();
            std::stringstream ss;
            ss << "./nonFaceFromFace/nonFace" << dataId << "_" << synImgNum << "_40.jpg";
            std::string outputName;
            ss >> outputName;
            cv::imwrite(outputName, outputImg);
            outputImg.release();
            synImgNum++;
        }
        grayImg.release();
    }
    fin.close();
    DebugLog << "GenerateNonFaceFromFace Done" << std::endl;
}
Exemple #6
0
cv::Mat image2vectors_single(cv::Mat img,
                             int neighborhoodRadius,
                             int nThreads)
{
    assert(CV_32FC1 == img.type());
    assert(img.isContinuous());

    /* Input image, output image */
    float *I, *V;

    float *K;

    /* Size of input image */
    int Isize[3]={1, 1, 1};
    int ndimsI = 2;

    /* Size of vector volume */
    int nVsize=2;
    int Vsize[2];
    int indexV=0;

    /* Constants used */
    int kernelratio=3;
    int kernelsize;

    int block[6]={1, 1, 1, 1, 1, 1};
    int block_size[3];

    int image3D;

    /* Loop variable */
    int i;
    float Isize_d[3];
    float Vsize_d[3];
    float par_d[4];
    float block_d[6];

    int Nthreads;

    /* float pointer array to store all needed function variables) */
    float ***ThreadArgs;
    float **ThreadArgs1;

    /* Handles to the worker threads */
    #ifdef _WIN32
            HANDLE *ThreadList;
    #else
            pthread_t *ThreadList;
    #endif

            /* ID of Threads */
            float **ThreadID;
    float *ThreadID1;

    /* Check input image dimensions */
    Isize[0]=img.size().width;
    Isize[1]=img.size().height;

    if(Isize[2]>3) { image3D=1; } else { image3D=0; }

    /* Connect input image */
    I = reinterpret_cast<float*>(img.data);

    /* Set Values */
    kernelratio = neighborhoodRadius;
    kernelsize=2*kernelratio+1;

    if(image3D==0) {
        block[0]=kernelratio;
        block[1]=kernelratio;
        block[2]=Isize[0]-kernelratio-1;
        block[3]=Isize[1]-kernelratio-1;

        block_size[0]=block[2]-block[0]+1;
        block_size[1]=block[3]-block[1]+1;

        Vsize[0]=kernelsize*kernelsize*Isize[2];
        Vsize[1]=block_size[0]*block_size[1];
    }
    else {
        block[0]=kernelratio;
        block[1]=kernelratio;
        block[2]=kernelratio;
        block[3]=Isize[0]-kernelratio-1;
        block[4]=Isize[1]-kernelratio-1;
        block[5]=Isize[2]-kernelratio-1;

        block_size[0]=block[3]-block[0]+1;
        block_size[1]=block[4]-block[1]+1;
        block_size[2]=block[5]-block[2]+1;

        Vsize[0]=kernelsize*kernelsize*kernelsize;
        Vsize[1]=block_size[0]*block_size[1]*block_size[2];
    }

    /* Create output array */
    cv::Mat outputImg(Vsize[1], Vsize[0], CV_32FC1);
    V = reinterpret_cast<float*>(outputImg.data);

    if(image3D==0) {
        K = gaussian_kernel_2D(kernelratio);
        for (i=0; i<(kernelsize*kernelsize); i++)  { K[i]=(float)sqrt(K[i]); }
    }
    else {
        K = gaussian_kernel_3D(kernelratio);
        for (i=0; i<(kernelsize*kernelsize*kernelsize); i++)  { K[i]=(float)sqrt(K[i]); }
    }

    Nthreads = nThreads;
    float nThreadsF = static_cast<float>(nThreads);

    /* Reserve room for handles of threads in ThreadList  */
    #ifdef _WIN32
            ThreadList = (HANDLE*)malloc(Nthreads* sizeof( HANDLE ));
    #else
            ThreadList = (pthread_t*)malloc(Nthreads* sizeof( pthread_t ));
    #endif

    ThreadID = (float **)malloc( Nthreads* sizeof(float *) );
    ThreadArgs = (float ***)malloc( Nthreads* sizeof(float **) );

    for(i=0; i<3; i++) {
        Isize_d[i]=(float)Isize[i];
        Vsize_d[i]=(float)Vsize[i];
        block_d[i] =(float)block[i];
        block_d[i+3] =(float)block[i+3];
    }
    par_d[0] =(float)kernelratio;
    par_d[1] =(float)image3D;

    for (i=0; i<Nthreads; i++) {
        /*  Make Thread ID  */
        ThreadID1= (float *)malloc( 1* sizeof(float) );
        ThreadID1[0]=(float)i;
        ThreadID[i]=ThreadID1;

        /*  Make Thread Structure  */
        ThreadArgs1 = (float **)malloc( 9* sizeof( float * ) );

        ThreadArgs1[0]=I;
        ThreadArgs1[1]=Isize_d;
        ThreadArgs1[2]=V;
        ThreadArgs1[3]=Vsize_d;
        ThreadArgs1[4]=par_d;
        ThreadArgs1[5]=block_d;
        ThreadArgs1[6]=K;
        ThreadArgs1[7]=ThreadID[i];
        ThreadArgs1[8]=&nThreadsF;

        /* Start a Thread  */
        ThreadArgs[i]=ThreadArgs1;

        #ifdef _WIN32
                ThreadList[i] = (HANDLE)_beginthreadex( NULL, 0, reinterpret_cast<unsigned int (__stdcall *)(void *)>(&getvectors_multi_threaded), ThreadArgs[i] , 0, NULL );
        #else
                pthread_create((pthread_t*)&ThreadList[i], NULL, (void *) &getvectors_multi_threaded, ThreadArgs[i]);
        #endif
    }

    #ifdef _WIN32
        for (i=0; i<Nthreads; i++) { WaitForSingleObject(ThreadList[i], INFINITE); }
        for (i=0; i<Nthreads; i++) { CloseHandle( ThreadList[i] ); }
    #else
        for (i=0; i<Nthreads; i++) { pthread_join(ThreadList[i], NULL); }
    #endif

    for (i=0; i<Nthreads; i++) {
        free(ThreadArgs[i]);
        free(ThreadID[i]);
    }

    free(ThreadArgs);
    free(ThreadID );
    free(ThreadList);
    free(K);

    return outputImg;
}