Beispiel #1
0
vector<vector<float> > Encoder::extractMultiLBP(Mat img, Mat landmarks, int level){
	//Mat img = imread(img_path, CV_LOAD_IMAGE_COLOR);
	VlLbp * lbp = vl_lbp_new (VlLbpUniform, VL_TRUE) ;
	int dimensionx = patchSize / cellSize;
	int dimensiony = patchSize / cellSize;
	int dimensionc = vl_lbp_get_dimension(lbp) ;
	vector<vector<float> > ret;
	float* code = new float[dimensionx*dimensiony*dimensionc];
	//cout<<"dim: "<<dimensionx<<" "<<dimensiony<<" "<<dimensionc <<endl;
	for (int l = 0; l < level; l++){
		int tmpcellSize = cellSize - l;
		int tmppatchSize = tmpcellSize*dimensionx;
		
		for (unsigned int i = 0; i < landmarks.cols; i++){
			if (landmarks.at<float>(0, i) > patchSize/2 && landmarks.at<float>(1, i) > patchSize/2 && landmarks.at<float>(0, i) + patchSize/2 < img.cols && landmarks.at<float>(1, i) + patchSize/2 < img.rows){
				Mat roi(img, Rect(landmarks.at<float>(0, i) - tmppatchSize/2 , landmarks.at<float>(1, i) - tmppatchSize/2, tmppatchSize, tmppatchSize));
				vector<float> data;
				if (lbp == NULL) {
				  cout<<"fail to init LBP detector"<<endl;
				  return ret;
				}
				for (int j = 0; j < roi.cols; j++){
					for (int k = 0; k < roi.rows; k++){
						data.push_back((float)roi.at<unsigned char>(k, j)/255);
					}
				}

				//float* features = new float[dimensionx * dimensiony * dimensionc];
				//cout<<"code size: x: "<<dimensionx<<" y: "<<dimensiony<<" c: "<<dimensionc<<endl;
				
				for (int j = 0; j < dimensionx*dimensiony*dimensionc; j++){
					code[j] = 0;
				}
				vl_lbp_process(lbp, code, &data[0], tmppatchSize, tmppatchSize, tmpcellSize);
				vector<float> lbpCode;
				for (int j = 0; j < dimensionx*dimensiony*dimensionc; j++){
					//cout<<code[j]<<" ";
					lbpCode.push_back(code[j]);
				}
				ret.push_back(lbpCode);
				//cout<<"feature "<<i/2<<" size: "<<ret.size()<<endl;
			}
			else{
				cout<<"Patch out of bound: "<<landmarks.at<float>(0, i)<<" "<<landmarks.at<float>(1, i)<<endl;
				exit(1);
			}
		}
	}
	delete[] code;
	vl_lbp_delete(lbp);
	return ret;	
}
template<typename Iter>double calcError(const cv::Mat& samples, const cv::Mat& model, Iter begin, Iter end){
	double error = 0;
	size_t count = 0;
	
	cv::Rect roi(0,0,samples.cols,1);
	for(Iter it = begin; it != end; it++){
		size_t idx = *it;
		roi.y = idx;
		error += cv::norm(model, cv::Mat(samples,roi));
		count++;
	}
	return error / count;
}
Beispiel #3
0
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
    UMat src = _src.getUMat(), dst = _dst.getUMat();

    for (int y = 0; y < ny; ++y)
        for (int x = 0; x < nx; ++x)
        {
            Rect roi(x * src.cols, y * src.rows, src.cols, src.rows);
            UMat hdr(dst, roi);
            src.copyTo(hdr);
        }
    return true;
}
template<typename Iter> cv::Mat fitModel(const cv::Mat& samples, Iter begin, Iter end){
	cv::Mat model(cv::Size(samples.cols,1),samples.type(),cv::Scalar(0));
	
	size_t count = 0;
	cv::Rect roi(0,0,samples.cols,1);
	for(Iter it = begin; it != end; it++){
		size_t idx = *it;
		roi.y = idx;
		model += cv::Mat(samples,roi);
		count++;
	}
	return model / count;
}
Beispiel #5
0
void SAHer::ComputeSAH(const cv::Mat &sal) {
    HalfToneInit();
    //float e_old = Objective();
    bool use_sal = (sal.cols && sal.rows);
    int block_size = 2;
    float temperature = .2f;
    float AnnealFactor = .8f;
    do {
        for (int block_i = 0; block_i < h_; block_i += block_size) for (int block_j = 0; block_j < w_; block_j += block_size) {

            std::vector<std::pair<int,int> > b_indices, w_indices;
            for (int ii = 0; ii < block_size && block_i + ii < h_; ii++) {
                for (int jj = 0; jj < block_size && block_j + jj < w_; jj++) {
                    int i = block_i + ii, j = block_j + jj;
                    if (halftone_image_.at<float>(i,j) > 0) w_indices.push_back(std::pair<int,int>(i, j));
                    else b_indices.push_back(std::pair<int,int>(i, j));
                }
            }

            if (b_indices.empty() || w_indices.empty()) continue;
            // else try block_size x block_size times of swap.
            cv::Rect roi(block_j, block_i, std::min(block_size,w_ - block_j ), std::min(block_size, h_ - block_i));
            float e_old = Objective(roi);

            int exhange_times = use_sal ? round(block_size * block_size * cv::mean(sal(roi))[0]) : block_size * block_size;
            for (int k = 0; k < exhange_times; k++){
                int rand1 = rand() % b_indices.size(), rand2 = rand() % w_indices.size();
                std::pair<int,int> idx1 = b_indices[rand1], idx2 = w_indices[rand2];
                halftone_image_.at<float>(idx1.first, idx1.second) = 1;
                halftone_image_.at<float>(idx2.first, idx2.second) = 0;
                float e_new = Objective(roi);
                float delta_e = e_new - e_old;
                if ( delta_e < 0.f || rand_float() < exp( - delta_e / temperature*w_*h_ ) ) {
                    // accept
                    e_old = e_new;
                    b_indices[rand1] = idx2;
                    w_indices[rand2] = idx1;
                } else {
                    // reject and undo swap
                    halftone_image_.at<float>(idx1.first, idx1.second) = 0;
                    halftone_image_.at<float>(idx2.first, idx2.second) = 1;
                }
            }

        }
        temperature *= AnnealFactor;
    } while (temperature > 0.15f);


    return;
}
		//TO-DO test for all types once instantiated
		void testCovolutionFullPixelOneBand() {
			std::cout << std::endl << "GPU CONV VERIFICATION TEST" << std::endl;
			ssize_t filterRadius = 1;
			cv::Size2i roi(5,5);
			cv::Size2i dSize(roi.width + filterRadius * 2,roi.height + filterRadius * 2);
			vector<short> data;
			data.resize(dSize.area());

			for(int i = 0; i < dSize.area(); ++i) {
				data[i] = i;
			}

			cvt::cvTile<short> inTile(data.data(), dSize, 1);
			cvt::cvTile<short>* outTile;

			cv::Mat weightsMat = cv::Mat::zeros(3,3,CV_16UC1);
			for(int i = 0; i < 3; ++i) {
				for(int j = 0; j < 3; ++j) {
					weightsMat.at<short>(i,j) = 2;
				}
			}
			
			inTile.setROI(cv::Rect(filterRadius, filterRadius, roi.width, roi.height));	
			cvt::gpu::GpuConvolution<short,1,short,1,short> conv(0, roi.width, roi.height,
									    filterRadius, weightsMat);

			TS_ASSERT_EQUALS(cvt::Ok, conv.initializeDevice(cvt::gpu::SQUARE));
			
			conv(inTile, (const cvt::cvTile<short>**)&outTile);
			TS_ASSERT_EQUALS(0, (outTile == NULL));

			//TODO Can we remove this?
			//cv::Mat& a = inTile[0];
			cv::Mat& b = (*outTile)[0];

			short expected[] = {32, 54, 66, 78, 
					  90, 102, 72, 90, 
					  144, 162, 180, 198, 
					  216, 150, 174, 270, 
					  288, 306, 324, 342,
					  234, 258, 396, 414, 432};

			int k = 0;
			for(int i = 0; i < roi.width; ++i) {
				for(int j = 0; j < roi.height; ++j) {
					//std::cout << "b[" << i << "," << j << "] = " << b.at<short>(i,j) << std::endl;
					TS_ASSERT_EQUALS(b.at<short>(i,j), expected[k]);
					k++;
				}
			}
	}
//--------------------------------------------------------------
void testApp::setup(){
	
    ofEnableSmoothing();
	ofBackground(50);

	ofRectangle roi(0, 512, 2048, 1024);
	ofSetWindowShape(roi.width, max(roi.height, 256.0f));

	grabber.open();
	grabber.setROI(roi);
	grabber.setExposure(1291);
	grabber.startCapture(TriggerMode::Trigger_GPIO1, TriggerSignalType::TriggerSignal_RisingEdge);
	recorder.setGrabber(grabber);

	this->toggleRecord = false;
	this->bangClear = false;
	this->bangClearBefore = false;
	this->bangClearAfter = false;
	this->bangSavePipets = false;
	this->toggleSave = false;
	this->toggleProgress = false;

	gui.setHeight(400);
	
	gui.addLabel("ofxMachineVision", OFX_UI_FONT_LARGE);
	gui.addLabel("Camcorder example", OFX_UI_FONT_MEDIUM);
	
	gui.addSpacer();

	gui.addLabel("Device");
	//gui.addButton("Open camera", &this->bangOpen);
	this->guiDeviceStateLabel = gui.addLabel("Device state", "...", OFX_UI_FONT_SMALL);
	
	gui.addSpacer();

	gui.addToggle("Record", &this->toggleRecord);
	this->guiRecordStateLabel = gui.addLabel("Recorder state", "...", OFX_UI_FONT_SMALL);
	this->guiRecordCountLabel = gui.addLabel("Frame count", "Empty", OFX_UI_FONT_SMALL);
	gui.addButton("Clear frames", &this->bangClear);
	gui.addButton("Clear before", &this->bangClearBefore);
	gui.addButton("Clear after", &this->bangClearAfter);
	gui.addButton("Save pipets", &this->bangSavePipets);
	gui.addToggle("Progress", &this->toggleProgress);
	gui.addToggle("Save", &this->toggleSave);
	gui.addSpacer();

	gui.addLabel("Frame details");
	this->guiFrameTimestamp = gui.addLabel("Timestamp", "", OFX_UI_FONT_SMALL);
	this->guiFrameDuration = gui.addLabel("Duration", "", OFX_UI_FONT_SMALL);
}
  void findMinMax(const cv::Mat &ir, const std::vector<cv::Point2f> &pointsIr)
  {
    minIr = 0xFFFF;
    maxIr = 0;
    for(size_t i = 0; i < pointsIr.size(); ++i)
    {
      const cv::Point2f &p = pointsIr[i];
      cv::Rect roi(std::max(0, (int)p.x - 2), std::max(0, (int)p.y - 2), 9, 9);
      roi.width = std::min(roi.width, ir.cols - roi.x);
      roi.height = std::min(roi.height, ir.rows - roi.y);

      findMinMax(ir(roi));
    }
  }
Beispiel #9
0
std::vector<cv::MatND> computeProbImage(cv::Mat image, std::vector<cv::Rect> rectRoi, std::vector<cv::Mat> &hist, std::vector<bool> &detected)
{
    int smin = 30;
    int vmin = 10;
    int vmax = 256;
    cv::Mat mask;
    cv::Mat hsv;
    cv::Mat hue;
    std::vector<cv::MatND> backProj;
    int channels[] = {0,0};
    int hbins = 30;                                   // Quantize the hue to 30 levels
    //int sbins = 32;                                 // and the saturation to 32 levels
    int histSize = MAX( hbins, 2 );
    //int histSizes[] = {hbins, sbins};
    float hue_range[] = { 0, 180 };                   // hue varies from 0 to 179, see cvtColor
    //float sat_range[] = { 0, 256 };                 // saturation varies from 0 (black-gray-white) to
    const float* range = { hue_range };               // 255 (pure spectrum color)
    //const float* ranges = { hue_range, sat_range };
    //double maxVal=0;

    backProj.resize(rectRoi.size());
    hist.resize(rectRoi.size());

    cv::cvtColor(image, hsv, CV_BGR2HSV);
    hue.create(hsv.size(), hsv.depth());
    cv::mixChannels(&hsv, 1, &hue, 1, channels, 1);
    cv::inRange(hsv, cv::Scalar(0, smin, MIN(vmin,vmax)), cv::Scalar(180, 256, MAX(vmin, vmax)), mask);

    for(size_t i=0;i<rectRoi.size();i++)
    {
        if(!detected[i])
        {
            cv::Mat roi(hue, rectRoi[i]);
            cv::Mat maskroi(mask, rectRoi[i]);

            cv::calcHist(&roi, 1, 0, maskroi, hist[i], 1, &histSize, &range, true, false);
            cv::normalize(hist[i], hist[i], 0, 255, cv::NORM_MINMAX);

            detected[i] = true;

            roi.release();
            maskroi.release();
        }

        cv::calcBackProject(&hue, 1, 0, hist[i], backProj[i], &range);
        backProj[i] &= mask;
    }

    return backProj;
}
bool ImageSegmenter::Process()
{
	// Check if we are working on full image or ROI
	if (m_processing_roi.length() > 0) { // ROI
		Mat inputImage = imread(m_document_image_path.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // converts to grayscale if required
		stringstream ss(m_processing_roi);
		
		ss >> m_roi_tlx >> m_roi_tly >> m_roi_brx >> m_roi_bry;
		
		Rect roi(m_roi_tlx, m_roi_tly, m_roi_brx - m_roi_tlx + 1, m_roi_bry - m_roi_tly + 1);
		m_docImage = inputImage(roi);
		//imshow("roi", m_docImage);
		//waitKey();
	}
Beispiel #11
0
/**
 * @brief makeCanvas Makes composite image from the given images
 * @param vecMat Vector of Images.
 * @param windowHeight The height of the new composite image to be formed.
 * @param nRows Number of rows of images. (Number of columns will be calculated
 *              depending on the value of total number of images).
 * @return new composite image.
 */
cv::Mat makeCanvas(std::vector<cv::Mat>& vecMat, int windowHeight, int nRows) {
    int N = vecMat.size();
    nRows  = nRows > N ? N : nRows;
    int edgeThickness = 10;
    int imagesPerRow = ceil(double(N) / nRows);
    int resizeHeight = floor(2.0 * ((floor(double(windowHeight - edgeThickness) / nRows)) / 2.0)) - edgeThickness;
    int maxRowLength = 0;
    
    std::vector<int> resizeWidth;
    for (int i = 0; i < N;) {
        int thisRowLen = 0;
        for (int k = 0; k < imagesPerRow; k++) {
            double aspectRatio = double(vecMat[i].cols) / vecMat[i].rows;
            int temp = int( ceil(resizeHeight * aspectRatio));
            resizeWidth.push_back(temp);
            thisRowLen += temp;
            if (++i == N) break;
        }
        if ((thisRowLen + edgeThickness * (imagesPerRow + 1)) > maxRowLength) {
            maxRowLength = thisRowLen + edgeThickness * (imagesPerRow + 1);
        }
    }
    int windowWidth = maxRowLength;
    cv::Mat canvasImage(windowHeight, windowWidth, CV_8UC3, cvScalar(0, 0, 0));
    
    for (int k = 0, i = 0; i < nRows; i++) {
        int y = i * resizeHeight + (i + 1) * edgeThickness;
        int x_end = edgeThickness;
        for (int j = 0; j < imagesPerRow && k < N; k++, j++) {
            int x = x_end;
            cv::Rect roi(x, y, resizeWidth[k], resizeHeight);
            cv::Size s = canvasImage(roi).size();
            // change the number of channels to three
            cv::Mat target_ROI(s, CV_8UC3);
            if (vecMat[k].channels() != canvasImage.channels()) {
                if (vecMat[k].channels() == 1) {
                    cv::cvtColor(vecMat[k], target_ROI, CV_GRAY2BGR);
                }
            }
            cv::resize(target_ROI, target_ROI, s);
            if (target_ROI.type() != canvasImage.type()) {
                target_ROI.convertTo(target_ROI, canvasImage.type());
            }
            target_ROI.copyTo(canvasImage(roi));
            x_end += resizeWidth[k] + edgeThickness;
        }
    }
    return canvasImage;
}
Beispiel #12
0
cv::Mat pointsToMat(std::vector<cv::Point2d>& pts)
{
    // each column of hm is a homogeneous coordinate of a point.
    int c=pts.size(); // amount of points
    cv::Mat hm(3,c,CV_64FC1,cv::Scalar(1.0));

    cv::Mat m(pts);
    m=m.reshape(1,c);
    m=m.t();

    cv::Mat roi(hm, cv::Rect(0,0,c,2));
    m.copyTo(roi);

    return hm;
}
Beispiel #13
0
void ColorEdge::detectColorEdge(const cv::Mat_<cv::Vec3b> &image, cv::Mat_<uchar> &edge)
{
    cv::Mat_<double> edge_map(image.size());
    const int filter_half = static_cast<int>(filter_size_ / 2);

    for(int y = filter_half; y < (edge_map.rows - filter_half); ++y)
    {
        for(int x = filter_half; x < (edge_map.cols - filter_half); ++x)
        {
            cv::Mat_<cv::Vec3b> roi(image, cv::Rect(x - filter_half, y - filter_half, filter_size_, filter_size_));
            edge_map(y, x) = calculateMVD(roi);
        }
    }
    
    edge_map.convertTo(edge, edge.type());
}
Beispiel #14
0
//deconvolve works with images, read in grayscale mode, 
//converted to CV_32F and divided by 255
cv::Mat wiener_deconvolve(cv::Mat img, bool defocus, int d, int ang, int noise, int sz){
	double snr = pow(10, -0.1*noise);
	blur_edge(img, img, 31);
	cv::Mat IMG;
	cv::dft(img, IMG, cv::DFT_COMPLEX_OUTPUT);
	cv::Mat psf;

	if (defocus)
		psf = defocus_kernel(d, sz);
	else
		psf = motion_kernel(ang, d, sz);

	cv::namedWindow("psf", cv::WINDOW_NORMAL);
	cv::imshow("psf", psf);

	cv::divide(psf, cv::sum(psf)[0], psf);
	cv::Mat psf_pad = cv::Mat::zeros(img.rows, img.cols, CV_32FC1);
	int kh = psf.rows;
	int kw = psf.cols;
	cv::Mat roi(psf_pad(cv::Rect(0, 0, kw, kh)));
	psf.copyTo(roi);
	cv::Mat PSF;
	cv::dft(psf_pad, PSF, cv::DFT_COMPLEX_OUTPUT, kh);
	cv::Mat PSF2 = cv::Mat::zeros(PSF.rows, PSF.cols, PSF.type());					//formula solution
	cv::mulSpectrums(PSF, PSF, PSF2, 0, true);
	cv::Mat mat_arr[2];
	cv::split(PSF2, mat_arr);
	mat_arr[0] += snr;
	mat_arr[1] = mat_arr[0];
	cv::merge(mat_arr, 2, PSF2);
	cv::divide(IMG, PSF2, IMG);
	cv::mulSpectrums(IMG, PSF, IMG, 0, true);
	cv::Mat result(img.rows, img.cols, CV_32FC1);
	cv::idft(IMG, result, cv::DFT_REAL_OUTPUT + cv::DFT_SCALE);
	roll_mat(result, kh, kw);
	IMG.release();
	psf.release();
	psf_pad.release();
	PSF.release();
	PSF2.release();
	roi.release();
	mat_arr[0].release();
	mat_arr[1].release();
	return result;
}
Beispiel #15
0
Mat convolutionOperator1D(Mat &signalVector, Mat &kernel, BorderTypes border) {

    Mat filtered;
    bool was1col = false;

    if (!signalVector.empty() || !kernel.empty()) {
        // If we receive a signalvector with one column, transpose it
        if (signalVector.cols == 1) {
            signalVector = signalVector.t();
            was1col = true;
        }
        int extraBorder = kernel.cols / 2;

        vector<Mat> signalVectorByChannels(signalVector.channels());
        split(signalVector, signalVectorByChannels);


        for (vector<Mat>::const_iterator it = signalVectorByChannels.begin(); it != signalVectorByChannels.end(); ++it) {
            Mat m = *(it);
            // Create a new Mat with the extra borders needed
            Mat signalWithBorder;
            // Add extra borders to the vector to solve boundary issue
            copyMakeBorder(m, signalWithBorder, 0, 0, extraBorder, extraBorder, border, Scalar(0));
            // Vector to store the convolution result
            filtered = m.clone();
            // Create a ROI to pass along the vector and compute convolution with the kernel
            Mat roi(signalWithBorder, Rect(0, 0, kernel.cols, 1));
            for (int i = 0; i < m.cols; i++) {
                // Multiply the focused section by the kernel
                Mat r = roi.mul(kernel);
                // Sum the result of the above operation to the pixel at i
                filtered.at<double>(i) = (double) *(sum(r).val);
                // Move the Roi one position to the right
                roi = roi.adjustROI(0, 0, -1, 1);
            }
            filtered.copyTo(m);
        }
        // Merge the vectors into a multichannel Mat
        merge(signalVectorByChannels, filtered);
    }

    filtered = was1col ? filtered.t() : filtered;

    return filtered;
}
// After calling CreateCollage() and FastAdjust(), call this function to save result
// collage to a image file specified by out_put_image_path.
cv::Mat CollageBasic::OutputCollageImage() const {
  // Traverse tree_leaves_ vector. Resize tile image and paste it on the canvas.
  assert(canvas_alpha_ != -1);
  assert(canvas_width_ != -1);
  cv::Mat canvas(cv::Size(canvas_width_, canvas_height_), image_vec_[0].type());
  assert(image_vec_[0].type() == CV_8UC3);
  for (int i = 0; i < image_num_; ++i) {
    int img_ind = tree_leaves_[i]->image_index_;
    FloatRect pos = tree_leaves_[i]->position_;
    cv::Rect pos_cv(pos.x_, pos.y_, pos.width_, pos.height_);
    cv::Mat roi(canvas, pos_cv);
    assert(image_vec_[0].type() == image_vec_[img_ind].type());
    cv::Mat resized_img(pos_cv.height, pos_cv.width, image_vec_[i].type());
    cv::resize(image_vec_[img_ind], resized_img, resized_img.size());
    resized_img.copyTo(roi);
  }
  return canvas;
}
Beispiel #17
0
void imageCB(const sensor_msgs::Image::ConstPtr img) {
  cv_bridge::CvImagePtr cv_ptr;
  try {
    cv_ptr = cv_bridge::toCvCopy(img);
  } catch (cv_bridge::Exception& ex1) {
    ROS_ERROR("cv_bridge exception: %s", ex1.what());
    return;
  }


  // set the image region of interest
  cv::Mat in_img1  = cv_ptr->image.clone();
  cv::Rect roi(60,50,400,350);
  cv::Mat in_img(in_img1, roi);
 

  // do a circular hough transform
  cv::Mat img1,gray;
  in_img.copyTo(img1);
  int thresh1 = 100;
  cv::Canny(img1, gray, thresh1, thresh1*2, 3 );

  // smooth it, otherwise a lot of false circles may be detected
  cv::GaussianBlur( gray, gray, cv::Size(9, 9), 2, 2 );
  cv::imshow( "blur", gray );
  
  std::vector<cv::Vec3f> circles;
  cv::HoughCircles(gray, circles, CV_HOUGH_GRADIENT,
		   2, gray.rows/3, 200, 100,50,150 );

  for( size_t i = 0; i < circles.size(); i++ ) {
    cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
    int radius = cvRound(circles[i][2]);

    // draw the circle center
    cv::circle( img1, center, 3, cv::Scalar(0,255,0), -1, 8, 0 );

    // draw the circle outline
    cv::circle( img1, center, radius, cv::Scalar(0,0,255), 7, 8, 0 );
  }
  
  cv::imshow( "circles", img1 );
  cv::waitKey(10);
}
void TrackShirt::SelectTargetShirtCallback(const PerFoRoControl::SelectTarget msg)
{
	Mat clickFrame = frame;
	Mat roiHSV;
	selection.x = msg.x;
	selection.y = msg.y;
	selection.width = msg.width;
	selection.height = msg.height;

	if( selection.width > 0 && selection.height > 0 )
		trackObject = 1;

	selectCenter.x = (int)(selection.x + selection.width/2);
	selectCenter.y = (int)(selection.y + selection.height/2);

	selectCentroid = selectCenter;

	//defines roi
	cv::Rect roi( selection.x, selection.y, selection.width, selection.height );

	//copies input image in roi
	cv::Mat image_roi = clickFrame(roi);

	cvtColor(image_roi, roiHSV, CV_BGR2HSV);

	//computes mean over roi
	cv::Scalar hsvColor = cv::mean( roiHSV );
	cout<<"hsv"<<hsvColor<<endl;

	double minH = (hsvColor.val[0] >= mColorRadius.val[0]) ? hsvColor.val[0]-mColorRadius.val[0] : 0;
	double maxH = (hsvColor.val[0]+mColorRadius.val[0] <= 179) ? hsvColor.val[0]+mColorRadius.val[0] : 179;

	mLowerBound.val[0] = minH;
	mUpperBound.val[0] = maxH;

	mLowerBound.val[1] = hsvColor.val[1] - mColorRadius.val[1];
	mUpperBound.val[1] = hsvColor.val[1] + mColorRadius.val[1];

	mLowerBound.val[2] = hsvColor.val[2] - mColorRadius.val[2];
	mUpperBound.val[2] = hsvColor.val[2] + mColorRadius.val[2];

	mLowerBound.val[3] = 0;
	mUpperBound.val[3] = 255;
}
Beispiel #19
0
TEST(ImageCacheEntryProcessing, RepeatEdgesAbove) {
    /*
     0000
     0000
     1234
     4567
     */
    RectI roundedBounds(0, 0, 4, 4);
    RectI roi(0,0,4,2);
    std::vector<char> buf(roundedBounds.area(), 0);

    *getBufAt(0, 0) = 4; *getBufAt(1, 0) = 5; *getBufAt(2, 0) = 6; *getBufAt(3, 0) = 7;
    *getBufAt(0, 1) = 1; *getBufAt(1, 1) = 2; *getBufAt(2, 1) = 3; *getBufAt(3, 1) = 4;

    ImageCacheEntryProcessing::repeatEdgesForDepth<char>(&buf[0], roi, roundedBounds.width(), roundedBounds.height());

    ASSERT_TRUE(*getBufAt(0,2) == 1); ASSERT_TRUE(*getBufAt(1,2) == 2); ASSERT_TRUE(*getBufAt(2,2) == 3); ASSERT_TRUE(*getBufAt(3,2) == 4);
    ASSERT_TRUE(*getBufAt(0,3) == 1); ASSERT_TRUE(*getBufAt(1,3) == 2); ASSERT_TRUE(*getBufAt(2,3) == 3); ASSERT_TRUE(*getBufAt(3,3) == 4);
}
Beispiel #20
0
cv::Mat DestinNetworkAlt::getLayerCentroidImages(int layer,
                              int scale_width,
                              int border_width){
    if(!isUniform){
        throw std::logic_error("can't displayLayerCentroidImages with non uniform DeSTIN.");
    }

    int centroids = getBeliefsPerNode(layer);
    int images_wide = ceil(sqrt(centroids));
    int sub_img_width = (int)((double)scale_width / (double)images_wide - (double)border_width);

    // sub image width plus boarder. Each image gets a right and bottom boarder only.
    int wpb = sub_img_width + border_width;

    int images_high = ceil((float)centroids / (float)images_wide);

    // initialize the big image as solid black
    cv::Mat big_img = cv::Mat::zeros(wpb*images_high, wpb*images_wide, getCvFloatImageType());

    int r, c, x, y;
    // copies the subimages into the correct place in the big image
    for(int centroid = 0 ; centroid < centroids ; centroid++){
            r = centroid  / images_wide;
            c = centroid - r * images_wide;
            x = c * wpb;
            y = r * wpb;

            cv::Mat subimage = convertCentroidImageToMatImage(layer, centroid, false);
            cv::Mat subimage_resized;
            cv::resize(subimage, subimage_resized, cv::Size(sub_img_width, sub_img_width), 0,0,cv::INTER_NEAREST);
            cv::Rect roi( cv::Point( x, y ), subimage_resized.size() );
            cv::Mat dest = big_img( roi );

            // copy centroid image into big image
            subimage_resized.copyTo( dest );
    }

    //cv::Mat toShow;
    big_img.convertTo(layerCentroidsImage, getCvByteImageType(), 255);

    //layerCentroidsImage = big_img;
    return layerCentroidsImage;
}
    virtual void SetUp()
    {
        devInfo = GET_PARAM(0);
        cv::cuda::setDevice(devInfo.deviceID());

        cv::Rect roi(0, 0, 48, 96);
        img = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
        ASSERT_FALSE(img.empty());
        c_img = img(roi);

        cv::Rect roi2(0, 0, 54, 108);
        c_img2 = img(roi2);

        cv::Rect roi3(0, 0, 64, 128);
        c_img3 = img(roi3);

        cv::Rect roi4(0, 0, 32, 64);
        c_img4 = img(roi4);
    }
Beispiel #22
0
TEST(ImageCacheEntryProcessing, RepeatEdgesGeneralCase) {
    /*
     Make such a rectangle
     00000
     01230
     04560
     07890
     00000

     fill the 0s by their corresponding numbers
     */
    RectI roundedBounds(0, 0, 5, 5);
    RectI roi(1, 1, 4, 4);
    std::vector<char> buf(roundedBounds.area(), 0);


    *getBufAt(1, 3) = 1; *getBufAt(2, 3) = 2; *getBufAt(3, 3) = 3;
    *getBufAt(1, 2) = 4; *getBufAt(2, 2) = 5; *getBufAt(3, 2) = 6;
    *getBufAt(1, 1) = 7; *getBufAt(2, 1) = 8; *getBufAt(3, 1) = 9;

    ImageCacheEntryProcessing::repeatEdgesForDepth<char>(&buf[0], roi, roundedBounds.width(), roundedBounds.height());

    ASSERT_TRUE(*getBufAt(0,0) == 7);
    ASSERT_TRUE(*getBufAt(1,0) == 7);
    ASSERT_TRUE(*getBufAt(2,0) == 8);
    ASSERT_TRUE(*getBufAt(3,0) == 9);
    ASSERT_TRUE(*getBufAt(4,0) == 9);

    ASSERT_TRUE(*getBufAt(0,1) == 7);
    ASSERT_TRUE(*getBufAt(4,1) == 9);

    ASSERT_TRUE(*getBufAt(0,2) == 4);
    ASSERT_TRUE(*getBufAt(4,2) == 6);

    ASSERT_TRUE(*getBufAt(0,3) == 1);
    ASSERT_TRUE(*getBufAt(4,3) == 3);

    ASSERT_TRUE(*getBufAt(0,4) == 1);
    ASSERT_TRUE(*getBufAt(1,4) == 1);
    ASSERT_TRUE(*getBufAt(2,4) == 2);
    ASSERT_TRUE(*getBufAt(3,4) == 3);
    ASSERT_TRUE(*getBufAt(4,4) == 3);
}
Beispiel #23
0
TEST(ImageCacheEntryProcessing, RepeatEdgesBottom) {
    /*
     1234
     5678
     0000
     0000
     */
    RectI roundedBounds(0, 0, 4, 4);
    RectI roi(0,2,4,4);
    std::vector<char> buf(roundedBounds.area(), 0);

    *getBufAt(0, 2) = 5; *getBufAt(1, 2) = 6; *getBufAt(2, 2) = 7; *getBufAt(3, 2) = 8;
    *getBufAt(0, 3) = 1; *getBufAt(1, 3) = 2; *getBufAt(2, 3) = 3; *getBufAt(3, 3) = 4;

    ImageCacheEntryProcessing::repeatEdgesForDepth<char>(&buf[0], roi, roundedBounds.width(), roundedBounds.height());

    ASSERT_TRUE(*getBufAt(0,0) == 5); ASSERT_TRUE(*getBufAt(1,0) == 6); ASSERT_TRUE(*getBufAt(2,0) == 7); ASSERT_TRUE(*getBufAt(3,0) == 8);
    ASSERT_TRUE(*getBufAt(0,1) == 5); ASSERT_TRUE(*getBufAt(1,1) == 6); ASSERT_TRUE(*getBufAt(2,1) == 7); ASSERT_TRUE(*getBufAt(3,1) == 8);
}
void GrabCut::run(Mat img, Mat &msk)
{
    cout << "run grabcut" << endl;
    _src	= img;
    _cutResultMask = Mat(img.size(), CV_8UC1, Scalar(0));
    _maskStore = Mat(img.size(), CV_8UC1, Scalar(0));
    _mask	= Mat::ones(_src.size(),CV_8UC1)*GC_PR_BGD;
    _bin	= Mat::zeros(_src.size(),CV_8UC1);
    cout << "GC_BGD " << GC_BGD <<endl;				// 0
    cout << "GC_FGD " << GC_FGD <<endl;				// 1
    cout << "GC_PR_BGD " << GC_PR_BGD <<endl;		// 2
    cout << "GC_PR_FGD " << GC_PR_FGD <<endl;		// 3
    _name = "graphcut";
    namedWindow(_name);
    setMouseCallback(_name, wevents,this);
    Rect roi(0,0,_src.cols,_src.rows);
    _dsp = Mat::zeros(_src.rows*2,_src.cols*2,CV_8UC3);
    _src.copyTo(_dsp(roi));
    //_dsp(roi) = _src.clone();
    cout << "loop" << endl;
    while(1)
    {
        imshow(_name,_dsp);
        char c = waitKey(1);				// 
        
        if(c=='d')							// done
        {			
            msk = _bin*1.0;					// output
            break;
        }
        else if(c=='f') _mode = GC_FGD;		// forground mode
        else if(c=='b') _mode = GC_BGD;		// background mode
        else if(c=='r')						// reset
        {
            _src.copyTo(_dsp(roi));			// 
            _mask	= GC_PR_BGD;
            _gcut	= GC_PR_BGD;
            show();
        }
    }
    destroyWindow(_name);
}
	void imageCb(const sensor_msgs::ImageConstPtr& msg)
	{
		//get image cv::Pointer
		cv_bridge::CvImagePtr cv_ptr;// = cv_bridge::toCvShare(msg);

		cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);

		src = cv_ptr->image;

		cv::Rect roi(0,0, src.size().width, (2.0/3.0)*src.size().height);

		cv::Mat cropped_image = src(roi);

		cv::waitKey(3);
		// Output modified video stream
		cv_bridge::CvImage out_msg;
		out_msg.header   = cv_ptr->header; // Same timestamp and tf frame as input image
		out_msg.encoding = cv_ptr->encoding; // Or whatever
		out_msg.image    = cropped_image;
		image_pub_.publish(out_msg.toImageMsg());
	}
Beispiel #26
0
static cv::Mat makeCanvas(std::vector<cv::Mat>& vecMat, int windowHeight, int nRows) {
   int N = vecMat.size();
   nRows  = nRows > N ? N : nRows;
   int edgeThickness = 10;
   int imagesPerRow = ceil(double(N) / nRows);
   int resizeHeight = floor(2.0 * ((floor(double(windowHeight - edgeThickness) / nRows)) / 2.0)) - edgeThickness;
   int maxRowLength = 0;

   std::vector<int> resizeWidth;
   for (int i = 0; i < N;) {
       int thisRowLen = 0;
       for (int k = 0; k < imagesPerRow; k++) {
           double aspectRatio = double(vecMat.at(i).cols) / vecMat.at(i).rows;
           int temp = int( ceil(resizeHeight * aspectRatio));
           resizeWidth.push_back(temp);
           thisRowLen += temp;
           if (++i == N) break;
       }
       if ((thisRowLen + edgeThickness * (imagesPerRow + 1)) > maxRowLength) {
           maxRowLength = thisRowLen + edgeThickness * (imagesPerRow + 1);
       }
   }

   int windowWidth = maxRowLength;
   cv::Mat canvasImage(windowHeight, windowWidth, CV_8UC3, Scalar(0, 0, 0));

   for (int k = 0, i = 0; i < nRows; i++) {
       int y = i * resizeHeight + (i + 1) * edgeThickness;
       int x_end = edgeThickness;
       for (int j = 0; j < imagesPerRow && k < N; k++, j++) {
           int x = x_end;
           Rect roi(x, y, resizeWidth.at(k), resizeHeight);
           cv::Mat target_ROI = canvasImage(roi);
           resize(vecMat.at(k), target_ROI, target_ROI.size());
           x_end += resizeWidth.at(k) + edgeThickness;
       }
   }

   return canvasImage;
}
FindResult PyramidTemplateMatcher::nextFromLowerPyramid(){
   FindResult match = lowerPyramid->next();

   int x = match.x*factor;
   int y = match.y*factor;

   
   // compute the parameter to define the neighborhood rectangle
   int x0 = max(x-(int)factor,0);
   int y0 = max(y-(int)factor,0);
   int x1 = min(x+data.target.cols+(int)factor,data.source.cols);
   int y1 = min(y+data.target.rows+(int)factor,data.source.rows);
   Rect roi(x0,y0,x1-x0,y1-y0);

   Point detectionLoc;
   double detectionScore = findBest(data, &roi, result, detectionLoc);

   detectionLoc.x += roi.x;
   detectionLoc.y += roi.y;

   return FindResult(detectionLoc.x,detectionLoc.y,data.target.cols,data.target.rows,detectionScore);
}
void LevelTracker::update(const VisionCore::Frame<cv::Mat>& frame){
	// Cria uma sub-imagem contendo a "region of interest"
	cv::Rect roi(topLeft,bottonRight);
	const cv::Mat& originalImg = frame.getImg();
	cv::Mat img = originalImg(roi);

	// Converte para espaço de cor Lab
	cv::Mat imgLab;
	cv::cvtColor(img,imgLab,CV_BGR2Lab);

	// Para cada linha, calcula distâncias para referências
	std::vector<double> distPos(img.rows);  //distância cumulativa para a referência em cada linha
	std::vector<double> distNeg(img.rows);
	double distPosCum=0;
	double distNegCum=0;
	for(int i=0;i<imgLab.rows;i++){
		cv::Mat row=imgLab.row(i);
		cv::Scalar meanColor = cv::mean(row);
		distPos[i]=distPosCum+cv::norm(meanColor,positiveColorLab,cv::NORM_L2);
		distNeg[i]=distNegCum+cv::norm(meanColor,negativeColorLab,cv::NORM_L2);
		distPosCum=distPos[i];
		distNegCum=distNeg[i];
	}

	// Decide nível. Calcula o erro de decisão para cada linha.
	double minError=std::numeric_limits<double>::max();
	int minErrorIndex=0;
	for(int i=0;i<imgLab.rows;i++){
		double error=distNeg[i]+distPos[imgLab.rows-1]-distPos[i];
		if(error<minError){
			minError=error;
			minErrorIndex=i;
		}
	}
	level=(imgLab.rows-minErrorIndex)/(double)imgLab.rows;

    /// True se algo não está configurado
    m_lostTrack = !negColOk || !posColOk || !regOk;
}
Beispiel #29
0
void distortx_node_t::do_calc_inputs_interest( const render::context_t& context)
{
    Imath::V2f amplitude = get_value<Imath::V2f>( param( "amplitude"));

    Imath::Box2i roi( interest());
    
    if( input( 1))
        input_as<image_node_t>( 1)->add_interest( roi);

	image_node_t *in = input_as<image_node_t>( 0);
	
	if( get_value<int>( param( "borders")) == border_black)
	{
	    roi.min.x -= amplitude.x;
	    roi.min.y -= amplitude.y;
	    roi.max.x += amplitude.x;
	    roi.max.y += amplitude.y;
	    in->add_interest( roi);
	}
	else
		in->add_interest( in->format());
}
RectScore SVMPlayerDetector::_histBackProject(cv::Mat& patch, cv::MatND& targetHist,cv::Size const& bbSize){
	// stay with RGB space
	/// back projection to the target histogram
		int histSize[] = {50,50,50};
	// each channel range from 0 to 256 (excluded)
	/// red changel range
	float rranges[] = { 0, 256 };
	float granges[] = { 0, 256 };
	float branges[] = { 0, 256 };
	int channels[] = {0,1,2};
	const float* ranges[] = { rranges, granges, branges };
	cv::MatND outputHist;
	cv::calcBackProject(&patch,1,channels,targetHist,outputHist,ranges);
	cv::namedWindow("patch");
	cv::namedWindow("patch_bp");
	cv::imshow("patch_bp",outputHist);
	cv::imshow("patch",patch);
	cv::waitKey(0);
	// scan the patch and get the maximum response
	int patchH = patch.size().height;
	int patchW = patch.size().width;
	float maxSum = 0;
	int maxI, maxJ;
	for(int i = 0; i < patchH - bbSize.height; i++)
		for(int j = 0; j < patchW - bbSize.width; j++){
			/// sum at current bb at outputHist
			cv::Rect roi(j,i,bbSize.width,bbSize.height);
			cv::Scalar tmpSum = cv::sum(patch(roi));
			if(tmpSum[0] > maxSum){
				maxSum = tmpSum[0];
				maxI = i;
				maxJ = j;
			}
	}
	RectScore matchScore;
	matchScore.rect = cv::Rect(maxJ,maxI,bbSize.width,bbSize.height);
	matchScore.score = maxSum;
	return matchScore;
}