int main(int argc, char **argv) { if (argc != 3) { fprintf(stdout, "usage: %s inputImageName threshNum\n", argv[0]); return -1; } string name(argv[1]); int thresh = atoi(argv[2]); Mat3b src = cvLoadImage(argv[1]); show_mat(src); Mat3b frame = src.clone(); floodFill(frame, Point(1,1),Scalar(0,0,0), NULL, Scalar::all(thresh), Scalar::all(thresh)); show_mat(frame); Mat1b gray; vGrayScale(frame, gray); // Create mat with alpha channel cv::Mat4b dst(src.size()); for (int j = 0; j < dst.rows; ++j) { for (int i = 0; i < dst.cols; ++i) { cv::Vec4b& rgba = dst(j, i); cv::Vec3b& rgb = src(j, i); rgba[0] = rgb[0]; rgba[1] = rgb[1]; rgba[2] = rgb[2]; if (gray(j,i) > 0) rgba[3] = 255; else rgba[3] = 0; } } try { std::vector<int> params; params.push_back(CV_IMWRITE_PNG_COMPRESSION); params.push_back(9); cvSaveImage(string(name+".png").c_str(), &(IplImage)dst, ¶ms[0]); } catch (std::runtime_error& ex) { fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); return 1; } fprintf(stdout, "Saved PNG file with alpha data.\n"); waitKey(); }
void ELAS::viz_lane_measurement_generation(const Mat3b & _colorFrame, HoughLine &esq, HoughLine &dir) { Mat3b viz_image = _colorFrame.clone(); // visualizar as houghs finais Scalar preto = Scalar(0, 0, 0); if (!esq.isEmpty()) esq.draw(viz_image, esqCor); if (!dir.isEmpty()) dir.draw(viz_image, dirCor); if (!esq.isEmpty()) esq.draw(viz_image, preto, 1); if (!dir.isEmpty()) dir.draw(viz_image, preto, 1); imshow("lane_measurement_generation", viz_image); waitKey(); }
void KalmanHoughs::view(HoughDoMeio *houghDoMeio, const Mat &colorFramePerspectiva, const Mat3b &colorFrameRoiIPM, const Scalar &cor) { // display perspectiva Mat imgPerspectiva = colorFramePerspectiva.clone(); if (houghDoMeio != NULL) houghDoMeio->draw(imgPerspectiva, cor, config); // ipm Mat3b imgIPM = colorFrameRoiIPM.clone(); Rect ipm = Rect(0, 0, colorFrameRoiIPM.cols, colorFrameRoiIPM.rows); if (houghDoMeio != NULL) { HoughLine _hough = HoughLine::create(*houghDoMeio, config); houghDoMeio->draw(imgIPM, cor); } imgIPM.copyTo(imgPerspectiva(ipm)); imshow("KalmanHoughs", imgPerspectiva); }
Mat visionUtils::skinDetect(Mat captureframe, Mat3b *skinDetectHSV, Mat *skinMask, std::vector<int> adaptiveHSV, int minPixelSize, int imgBlurPixels, int imgMorphPixels, int singleRegionChoice, bool displayFaces) { if (adaptiveHSV.size()!=6 || adaptiveHSV.empty()) { adaptiveHSV.clear(); adaptiveHSV.push_back(5); adaptiveHSV.push_back(38); adaptiveHSV.push_back(51); adaptiveHSV.push_back(17); adaptiveHSV.push_back(250); adaptiveHSV.push_back(242); } //int step = 0; Mat3b frameTemp; Mat3b frame; // Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size..... // Note returned to original size at end... Size s = captureframe.size(); cv::resize(captureframe,captureframe,Size(640,480)); if (useGPU) { GpuMat imgGPU, imgGPUHSV; imgGPU.upload(captureframe); cv::cvtColor(imgGPU, imgGPUHSV, CV_BGR2HSV); GaussianBlur(imgGPUHSV, imgGPUHSV, Size(imgBlurPixels,imgBlurPixels), 1, 1); imgGPUHSV.download(frameTemp); } else { cv::cvtColor(captureframe, frameTemp, CV_BGR2HSV); GaussianBlur(frameTemp, frameTemp, Size(imgBlurPixels,imgBlurPixels), 1, 1); } // Potential FASTER VERSION using inRange Mat frameThreshold = Mat::zeros(frameTemp.rows,frameTemp.cols, CV_8UC1); Mat hsvMin = (Mat_<int>(1,3) << adaptiveHSV[0], adaptiveHSV[1],adaptiveHSV[2] ); Mat hsvMax = (Mat_<int>(1,3) << adaptiveHSV[3], adaptiveHSV[4],adaptiveHSV[5] ); inRange(frameTemp,hsvMin ,hsvMax, frameThreshold); frameTemp.copyTo(frame,frameThreshold); /* BGR CONVERSION AND THRESHOLD */ Mat1b frame_gray; // send HSV to skinDetectHSV for return *skinDetectHSV=frame.clone(); cv::cvtColor(frame, frame_gray, CV_BGR2GRAY); // Adaptive thresholding technique // 1. Threshold data to find main areas of skin adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1); if (useGPU) { GpuMat imgGPU; imgGPU.upload(frame_gray); // 2. Fill in thresholded areas #if CV_MAJOR_VERSION == 2 gpu::morphologyEx(imgGPU, imgGPU, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2); gpu::GaussianBlur(imgGPU, imgGPU, Size(imgBlurPixels,imgBlurPixels), 1, 1); #elif CV_MAJOR_VERSION == 3 //TODO: Check if that's correct Mat element = getStructuringElement(MORPH_RECT, Size(imgMorphPixels, imgMorphPixels), Point(-1, -1)); Ptr<cuda::Filter> closeFilter = cuda::createMorphologyFilter(MORPH_CLOSE, imgGPU.type(), element, Point(-1, -1), 2); closeFilter->apply(imgGPU, imgGPU); cv::Ptr<cv::cuda::Filter> gaussianFilter = cv::cuda::createGaussianFilter(imgGPU.type(), imgGPU.type(), Size(imgMorphPixels, imgMorphPixels), 1, 1); gaussianFilter->apply(imgGPU, imgGPU); #endif imgGPU.download(frame_gray); } else { // 2. Fill in thresholded areas morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2); GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1); // Select single largest region from image, if singleRegionChoice is selected (1) } if (singleRegionChoice) { *skinMask = cannySegmentation(frame_gray, -1, displayFaces); } else // Detect each separate block and remove blobs smaller than a few pixels { *skinMask = cannySegmentation(frame_gray, minPixelSize, displayFaces); } // Just return skin Mat frame_skin; captureframe.copyTo(frame_skin,*skinMask); // Copy captureframe data to frame_skin, using mask from frame_ttt // Resize image to original before return cv::resize(frame_skin,frame_skin,s); if (displayFaces) { imshow("Skin HSV (B)",frame); imshow("Adaptive_threshold (D1)",frame_gray); imshow("Skin segmented",frame_skin); } return frame_skin; waitKey(1); }