Пример #1
0
void TrickEstimater::labelContour(unsigned int contour_id, unsigned int rv_size)
{
	LabelingBS	labeling;
	bool at_is_sort_size_order = false;//óÃàÊÇÃëÂÇ´Ç≥èáÇ…É\Å[ÉgǵǻǢ
	int at_delete_size = 0;    //è¡ãéÇ∑ÇÈè¨óÃàÊÇÃç≈ëÂÉTÉCÉY(int) - DZÇÍà»â∫ÇÃÉTÉCÉYÇÃóÃàÊÇè¡ãéÇ∑ÇÈ
	unsigned char *src = new unsigned char[rv_size];
	short *result = new short[rv_size];
    
	for (int i = 0; i < rv_size; i++){
		src[i] = (unsigned char)is_sharp_angle[contour_id][i];
	}
	labeling.Exec(src, result, rv_size, 1, at_is_sort_size_order, at_delete_size);
	label_index_max = labeling.GetNumOfResultRegions();
    
	for (int i = 0; i < rv_size; i++){
		is_sharp_angle[contour_id][i] = (unsigned short)result[i];
	}
	for (int i = 0; i < label_index_max; i++){
		label_index[contour_id][i] = (labeling.GetResultRegionInfo(i))->GetResult();
	}
    
	delete[] src;
	delete[] result;
    
}
Пример #2
0
int main(int argc, char* argv[])
{
    // カメラ映像の幅と高さ
	int w = 640;
	int h = 480;
	Mat im, hsv, mask;					// 画像オブジェクトの作成
	LabelingBS labeling;				// ラベリング関連の変数
	RegionInfoBS *ri;
	short *mask2 = new short[w * h];//ラベリング出力先
	VideoCapture cap(0);				// カメラのキャプチャ
	if (!cap.isOpened()) return -1;		// キャプチャ失敗時のエラー処理

	while (1){
		cap >> im;								// カメラから画像を取得
		cvtColor(im, hsv, CV_BGR2HSV);			// 画像をRGBからHSVに変換
		inRange(hsv, Scalar(150, 70, 70), Scalar(360, 255, 255), mask);	// 色検出でマスク画像の作成
		// 白領域が無い場合のエラー処理
		rectangle(mask, Point(0, 0), Point(1, 1), Scalar(255),-1);
		//ラベリング処理
		labeling.Exec((uchar *)mask.data, mask2, w, h, true, 30);
		//最大の領域を四角で囲む
		ri = labeling.GetResultRegionInfo(0);
		int x1, y1, x2, y2;
		ri->GetMin(x1, y1);
		ri->GetMax(x2, y2);
		rectangle(im, Point(x1, y1), Point(x2, y2), Scalar(255, 0, 0), 3);
		imshow("Camera", im);					// カメラ映像の表示
		imshow("Mask", mask);					// マスク画像の作成
		if (waitKey(30) >= 0){					// 任意のキー入力があれば終了
			break;
		}
	}
	return 0;
}
Пример #3
0
void OpenCVAdapter::LabelFrame(){
	LabelingBS	labeling;
	labeling.Exec(label,labeled,xnum,ynum,true,trackbarPosition2);
	int n = labeling.GetNumOfResultRegions();
	printf("%d region found\n",n);
	for(int i=0;i<n;i++){
		RegionInfoBS *ri;
		ri=labeling.GetResultRegionInfo(i);
		float x,y;
		ri->GetCenterOfGravity(x,y);
		int px=(int)x*winx/width;
		int py=(int)y*winy/height;
		plist.push_back(make_pair((int)px,(int)py));
	}
}
Пример #4
0
 void BlobDetector::detect(
   const sensor_msgs::Image::ConstPtr& image_msg)
 {
   vital_checker_->poke();
   boost::mutex::scoped_lock lock(mutex_);
   cv::Mat image = cv_bridge::toCvShare(image_msg, image_msg->encoding)->image;
   cv::Mat label(image.size(), CV_16SC1);
   LabelingBS labeling;
   labeling.Exec(image.data, (short*)label.data, image.cols, image.rows,
                 true, min_area_);
   
   cv::Mat label_int(image.size(), CV_32SC1);
   for (int j = 0; j < label.rows; j++) {
     for (int i = 0; i < label.cols; i++) {
       label_int.at<int>(j, i) = label.at<short>(j, i);
     }
   }
   pub_.publish(
     cv_bridge::CvImage(image_msg->header,
                        sensor_msgs::image_encodings::TYPE_32SC1,
                        label_int).toImageMsg());
 }
Пример #5
0
void ImgProcQt::detectCard(cv::Mat& bgrImg){
    // 画面の中の明るい部分(カードは明るいはず)を抽出するためにHSVへ変換
    cv::Mat hsvImg;
    cv::vector<cv::Mat> splited(3);
    cv::cvtColor(bgrImg, hsvImg, CV_BGR2HSV);
    cv::split(hsvImg, splited);

    // 2値化
    cv::Mat binImg;
    cv::threshold(splited[2], binImg, 128, 255, CV_THRESH_BINARY);

    //Labeling
    cv::Mat rImg(binImg.size(), CV_16SC1);
    LabelingBS	labeling;
    labeling.Exec(binImg.data, (short *)rImg.data, binImg.cols, binImg.rows, true, 10);
    // カードのエリアを取得
    cv::Mat cardImg;
    cv::compare(rImg, 1, cardImg, CV_CMP_EQ);
    // 1箇所だけの輪郭を取得
    cv::vector<cv::vector<cv::Point2i> > contours;
    cv::findContours(cardImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1);

    // ラベリング時点で1つしか輪郭が取れないことが保障されている。はず。
    cv::drawContours(bgrImg, contours, 0, CV_RGB(0,255,0), 1);
Пример #6
0
void DemoInterface::ImgCallBack(const sensor_msgs::ImageConstPtr &msg) {
    if (raw_img_.empty())
        return;

    cv::Rect roi(0, 0, raw_img_.cols, raw_img_.rows);
    cv::Mat marker_img = cv::Mat::zeros(raw_img_.size(), CV_8UC1);

    //一度平滑化した後,HSV色空間に変換
    cv::Mat hsv_img;
    cv::medianBlur(raw_img_, hsv_img, 13);
    cv::cvtColor(hsv_img, hsv_img, CV_BGR2HSV);

    //黄色の領域を抽出
    ExtractColor(hsv_img, marker_img);

    //ラベリング
    cv::Mat label(raw_img_.size(), CV_16SC1);
    LabelingBS labeling;
    labeling.Exec(marker_img.data, (short *) label.data, raw_img_.cols, raw_img_.rows, false, 0);

    cv::Mat out_img(raw_img_.size(), CV_8UC3, cv::Scalar(0, 0, 0));
    int num_regions = labeling.GetNumOfRegions();

    //ラベリング数が2以上,10未満の時
    if (num_regions > 1 && num_regions < 10) {
        cv::Point point[2];
        cv::Point max_pt[2] = {};
        int max_idx[2] = {0, 1};
        int max_sum[2] = {};
        int num_pt = 0;

        //面積最大の要素を2つ計算する
        for (int i = 0; i < num_regions; i++) {
            int sum = 0;
            cv::Point pt(0, 0);
            cv::Mat_<uchar> labelarea;
            cv::compare(label, i + 1, labelarea, CV_CMP_EQ);

            for (int y = 0; y < labelarea.rows; y++) {
                const uchar *labelarea_row = labelarea.ptr<uchar>(y);
                for (int x = 0; x < labelarea.cols; x++) {
                    if (labelarea_row[x] > 250) {
                        sum++;
                        pt.x += x;
                        pt.y += y;
                    }
                }
            }

            for (int j = 0; j < 2; j++) {
                if (sum > max_sum[j]) {
                    if (pt.x == 0 && pt.y == 0)
                        break;
                    else if (sum < SUM_MIN)
                        break;
                    for (int n = 1; n > j; n--) {
                        max_idx[n] = max_idx[n - 1];
                        max_sum[n] = max_sum[n - 1];
                        max_pt[n] = max_pt[n - 1];
                    }
                    max_idx[j] = i;
                    max_sum[j] = sum;
                    max_pt[j] = pt;
                    num_pt++;
                    break;
                }
            }
        }

        if (num_pt != 2)
            return;

        for (int i = 0; i < 2; i++) {
            int sum = max_sum[i];
            int idx = max_idx[i];
            point[i] = max_pt[i];

            cv::Mat_<uchar> labelarea;
            cv::compare(label, idx + 1, labelarea, CV_CMP_EQ);
            cv::Mat color(raw_img_.size(), CV_8UC3, cv::Scalar(255, 255, 255));

            if (sum > 0) {
                point[i].x /= sum;
                point[i].y /= sum;
                color.copyTo(out_img, labelarea);
                cv::circle(out_img, point[i], 20, cv::Scalar(0, 200, 0), 8, 8);
            }
        }

        //指定領域の行列のサイズと中点
        int row_diff = abs(point[0].y - point[1].y);
        int col_diff = abs(point[0].x - point[1].x);
        int row_center = (point[0].y + point[1].y) / 2;
        int col_center = (point[0].x + point[1].x) / 2;
        int length = 0;

        //縦幅と横幅の大きい方を正方形の一辺に取る
        if (row_diff > col_diff)
            length = row_diff;
        else
            length = col_diff;

        roi.x = col_center - length / 2;
        roi.y = row_center - length / 2;
        roi.width = length;
        roi.height = length;

        //境界付近の処理
        if (roi.x + roi.width > raw_img_.cols)
            roi.width = raw_img_.cols - roi.x;
        if (roi.y + roi.height > raw_img_.rows)
            roi.height = raw_img_.rows - roi.y;
        if (roi.x < 0) {
            roi.width += roi.x;
            roi.x = 0;
        }
        if (roi.y < 0) {
            roi.height += roi.y;
            roi.y = 0;
        }

        if (roi.x || roi.y || roi.width || roi.height) {
            point_rec_1_ = cv::Point(col_center - length / 2, row_center - length / 2);
            point_rec_2_ = cv::Point(col_center + length / 2, row_center + length / 2);

            //受信画像の指定領域をROSメッセージにセット
            pinch_classifier::classify srv;
            srv.request.image = *cv_bridge::CvImage(std_msgs::Header(), "bgr8", raw_img_(roi)).toImageMsg();

            //Caffeサーバを呼び出す
            CHECK(client_.call(srv)) << "Failed to call service";

            //返ってきた結果を整形
            result_ = ConvertResponse(srv.response.label);
            point_ = cv::Point(roi.x, roi.y - 20);
            draw_ = true;
            delay_ = DELAY;
        }
    }
    else {
        if (--delay_ == 0) {
            draw_ = false;
            delay_ = DELAY;
        }
    }

    cv::rectangle(out_img, point_rec_1_, point_rec_2_, cv::Scalar(0, 200, 0), 3, 4);
    cv::imshow("Yellow", out_img);
    cv::imshow("Marker", marker_img);
}