예제 #1
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
void imageHist(const Mat& input, Mat& histVector){
    Mat input_tmp;
    unsigned char *input_data;
    float *hist_data;
    Size inputSize;
    // To check data is float or uchar
    if(input.type() == CV_32FC(1)){
        //printf("CV_32FC1 Dectect in imageHist!!\n");
        input.convertTo(input_tmp, CV_8UC(1), 255.0);
    }
    else if(input.type() == CV_8UC(1)){
        //printf("CV_8UC1 Dectect in imageHist!!\n");
        input_tmp = input;
    }
    else{
        printf("Error Type in imageHist!!!\n");
        exit(0);
    }
    input_data = input_tmp.data; // get input data
    inputSize = input_tmp.size(); // get input size
    histVector = Mat::zeros(1, 256, CV_32FC(1)); // get a zero mat
    //printf("%d\n", histVector.type()==CV_32FC(1));
    hist_data = (float*)histVector.data;
    // calculat histtgram
    for(int i=0; i<inputSize.width * inputSize.height; i++){
        int tmp = (int)input_data[i];
        hist_data[tmp]++;
    }
    return;
}
Mat ColorChannelFilter::applyTo(const Mat& image, Mat& filtered) const {
	if (image.type() != CV_8UC3)
		throw invalid_argument("ColorChannelFilter: the image must be of type CV_8UC3");

	int rows = image.rows;
	int cols = image.cols;
	float normalizer = 1.f / 255.f;
	if (magnitude) { // include magnitude
		filtered.create(rows, cols, CV_8UC(bins + 1));
		if (image.isContinuous() && filtered.isContinuous()) {
			cols *= rows;
			rows = 1;
		}
		for (int row = 0; row < rows; ++row) {
			const Vec3b* colorCode = image.ptr<Vec3b>(row);
			uchar* values = filtered.data + filtered.step[0] * row;
			for (int col = 0; col < cols; ++col) {
				for (unsigned int bin = 0; bin <= bins; ++bin)
					values[bin] = 0;
				uchar hue = colorCode[col][0];
				uchar saturation = colorCode[col][1];
				uchar value = colorCode[col][2];
				float magnitude = normalizer * saturation * value; // magnitude is between 0 and 255
				BinData binData = color2bin[hue];
				values[binData.bin1] += cv::saturate_cast<uchar>(cvRound(binData.weight1 * magnitude));
				values[binData.bin2] += cv::saturate_cast<uchar>(cvRound(binData.weight2 * magnitude));
				values[bins] += binData.weight1 + binData.weight2;
				values += bins + 1;
			}
		}
	} else { // do not include magnitude
		filtered.create(rows, cols, CV_8UC(bins));
		if (image.isContinuous() && filtered.isContinuous()) {
			cols *= rows;
			rows = 1;
		}
		for (int row = 0; row < rows; ++row) {
			const Vec3b* colorCode = image.ptr<Vec3b>(row);
			uchar* values = filtered.data + filtered.step[0] * row;
			for (int col = 0; col < cols; ++col) {
				for (unsigned int bin = 0; bin < bins; ++bin)
					values[bin] = 0;
				uchar hue = colorCode[col][0];
				uchar saturation = colorCode[col][1];
				uchar value = colorCode[col][2];
				float magnitude = normalizer * saturation * value; // magnitude is between 0 and 255
				BinData binData = color2bin[hue];
				values[binData.bin1] += cv::saturate_cast<uchar>(cvRound(binData.weight1 * magnitude));
				values[binData.bin2] += cv::saturate_cast<uchar>(cvRound(binData.weight2 * magnitude));
				values += bins;
			}
		}
	}

	return filtered;
}
예제 #3
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
void histEqualization(const Mat& input, Mat& output, Mat& T){
    Mat input_tmp;
    Mat histVector;
    unsigned char* input_data;
    unsigned char* output_data;
    unsigned char* T_data;
    float* hist_data;
    Size inputSize;
    double time_count;
    // To check input data is float or uchar
    if(input.type() == CV_32FC(1)){
        //printf("CV_32FC1 Dectect in histEqualization!!\n");
        input.convertTo(input_tmp, CV_8UC(1), 255.0);
    }
    else if(input.type() == CV_8UC(1)){
        //printf("CV_8UC1 Dectect in histEqualization!!\n");
        input_tmp = input;
    }
    else{
        printf("Error Type in histEqualization!!!\n");
        exit(0);
    }
    time_count = (double)getTickCount();
    T.create(1,256,CV_8UC(1));
    T_data = (unsigned char*)T.data;
    output.create(input.size(), CV_8UC(1));
    input_data = (unsigned char*)input_tmp.data;
    inputSize = input_tmp.size();
    output_data = (unsigned char*)output.data;
    imageHist(input, histVector); // get the image hist
    hist_data = (float*)histVector.data;

    float accum = 0; //  get the accumadata amount
    int height = inputSize.height;
    int width = inputSize.width;
    // To get the transform function
    for(int i=0; i<256; i++){
        T_data[i] = (unsigned char)((255.0/(height*width))* (accum + hist_data[i]) + 0.5);
        accum += hist_data[i];
    }
    // To write the output by T
    for(int i=0; i<inputSize.width*inputSize.height; i++){
        output_data[i] = T_data[(int)input_data[i]];
    }
    time_count = (double)getTickCount() - time_count;
    printf("histEqualization Total consume %gms\n", time_count*1000/getTickFrequency());// get processing time
    return;
}
예제 #4
0
void BackgroundSubtractorLOBSTER_<ParallelUtils::eGLSL>::getBackgroundImage(cv::OutputArray oBGImg) const {
    lvDbgExceptionWatch;
    CV_Assert(m_bInitialized);
    glAssert(m_bGLInitialized && !m_vnBGModelData.empty());
    oBGImg.create(m_oFrameSize,CV_8UC(int(m_nImgChannels)));
    cv::Mat oOutputImg = oBGImg.getMatRef();
    glBindBuffer(GL_SHADER_STORAGE_BUFFER,getSSBOId(BackgroundSubtractorLOBSTER_::eLOBSTERStorageBuffer_BGModelBinding));
    glGetBufferSubData(GL_SHADER_STORAGE_BUFFER,0,m_nBGModelSize*sizeof(uint),(void*)m_vnBGModelData.data());
    glErrorCheck;
    for(size_t nRowIdx=0; nRowIdx<(size_t)m_oFrameSize.height; ++nRowIdx) {
        const size_t nModelRowOffset = nRowIdx*m_nRowStepSize;
        const size_t nImgRowOffset = nRowIdx*oOutputImg.step.p[0];
        for(size_t nColIdx=0; nColIdx<(size_t)m_oFrameSize.width; ++nColIdx) {
            const size_t nModelColOffset = nColIdx*m_nColStepSize+nModelRowOffset;
            const size_t nImgColOffset = nColIdx*oOutputImg.step.p[1]+nImgRowOffset;
            std::array<float,4> afCurrPxSum = {0.0f,0.0f,0.0f,0.0f};
            for(size_t nSampleIdx=0; nSampleIdx<m_nBGSamples; ++nSampleIdx) {
                const size_t nModelPxOffset = nSampleIdx*m_nSampleStepSize+nModelColOffset;
                for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) {
                    const size_t nModelTotOffset = nChannelIdx+nModelPxOffset;
                    afCurrPxSum[nChannelIdx] += m_vnBGModelData[nModelTotOffset];
                }
            }
            for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) {
                const size_t nSampleChannelIdx = ((nChannelIdx==3||m_nImgChannels==1)?nChannelIdx:2-nChannelIdx);
                const size_t nImgTotOffset = nSampleChannelIdx+nImgColOffset;
                oOutputImg.data[nImgTotOffset] = (uchar)(afCurrPxSum[nChannelIdx]/m_nBGSamples);
            }
        }
    }
}
예제 #5
0
// public
cv::Mat RFeatures::loadTGA( const std::string& fname)
{
    cv::Mat m;
    FILE *bstream = fopen( fname.c_str(), "rb");
    if ( !bstream)
    {
        std::cerr << "[ERROR] RFeatures::loadTGA(" << fname << "): Unable to open file for reading!" << std::endl;
        return m;
    }   // end if

    // Read the header
    TGAHeader tga;
    if ( fread( tga.barray, 1, 18, bstream) != 18)
    {
        std::cerr << "[ERROR] RFeatures::loadTGA: Failed to read TGA header!" << std::endl;
        return m;
    }   // end if
    tga.setFromArray();

    // Read the image bytes row by row (BGA order)
    int bread = 0;
    m = cv::Mat( tga.height, tga.width, CV_8UC(tga.bitsperpixel/8));
    const int nc = m.cols * m.channels();
    for ( int i = int(m.rows-1); i >= 0; --i)   // Read bottom to top
        bread += (int)fread( (void*)m.ptr(i), 1, nc, bstream);

    if ( bread != int(nc * m.rows))
    {
        std::cerr << "[ERROR] RFeatures::loadTGA: Failed to read all " << (nc * m.rows) << " bytes of the image!" << std::endl;
        return cv::Mat();
    }   // end if

    fclose(bstream);
    return m;
}   // end loadTGA
예제 #6
0
int ns__MatToJPG (struct soap *soap, char *InputMatFilename, char **OutputMatFilename)
{
    double start, end;
    start = omp_get_wtime();

    Mat src;

    /* check if it is not 8U then convert to 8UC(n) */
    int chan = src.channels();
    if( src.type() != 0 || src.type() != 8 || src.type() != 16 )
    {
       src.convertTo(src, CV_8UC(chan));
    }

    /* generate output file name */
	*OutputMatFilename = (char*)soap_malloc(soap, FILENAME_SIZE);
    getOutputFilename(OutputMatFilename,".jpg");

    if(!imwrite(*OutputMatFilename, src))
    {
        cerr<< "MatToJPG:: can not save mat to jpg file" << endl;
        return soap_receiver_fault(soap, "MatToJPG:: can not save mat to jpg file", NULL);
    }

    src.release();

    end = omp_get_wtime();
    cerr<<"ns__MatToJPG "<<"time elapsed "<<end-start<<endl;

    return SOAP_OK;
}
예제 #7
0
bool Camera::Iterate()
{
  AppCastingMOOSApp::Iterate();
  Mat m_capture_frame(LARGEUR_IMAGE_CAMERA, HAUTEUR_IMAGE_CAMERA, CV_8UC(3));

  if(m_vc_v4l2.read(m_capture_frame))
  {
    if(m_inverser_image)
      flip(m_capture_frame, m_capture_frame, -1);

    Notify((char*)(m_image_name).c_str(), (void*)m_capture_frame.data, 3 * LARGEUR_IMAGE_CAMERA * HAUTEUR_IMAGE_CAMERA, MOOSLocalTime());
    //imwrite("test.jpeg", m_capture_frame);

    if(m_affichage_image)
    {
      imshow(m_display_name, m_capture_frame);
      waitKey(1);
    }
    retractRunWarning("No frame grabbed.");
  }
  else
    reportRunWarning("No frame grabbed.");
    // MOOSDebugWrite("No frame grabbed.");

  AppCastingMOOSApp::PostReport();
  return(true);
}
void 
mexFunction(int nlhs, mxArray *plhs[], 
            int nrhs, const mxArray *prhs[])
{
    // default parameters
    int ksize = 3;
    double sigma = 2.0;

    //validate input
    if (nrhs == 0)
    {
        mexErrMsgTxt("An image is required!");
    }
    if (nlhs != 1)
    {
        mexErrMsgTxt("Only one output is provided.");
    }
    if(!mxIsDouble(prhs[0]) || ((mxGetNumberOfDimensions(prhs[0]) != 3) && (mxGetNumberOfDimensions(prhs[0]) != 2)))
    {
        mexErrMsgTxt("Type of the image has to be double.");
    }
    if((nrhs >= 2)  && ((!mxIsDouble(prhs[1])) || (mxGetScalar(prhs[1]) <= 0)))
    {
        mexErrMsgTxt("ksize has to be a positive integer.");
    } 
    else if (nrhs >= 2)
    {
        ksize = (int) mxGetScalar(prhs[1]);
    }
    if((nrhs >= 3)  && ((!mxIsDouble(prhs[2])) || (mxGetScalar(prhs[2]) <= 0)))
    {
        mexErrMsgTxt("sigma has to be a positive value.");
    } 
    else if (nrhs >= 3)
    {
        sigma = (double) mxGetScalar(prhs[2]);
    }
    
    // determine input/output image properties
    const int *dims    = mxGetDimensions(prhs[0]);
    const int nDims    = mxGetNumberOfDimensions(prhs[0]);
    const int rows     = dims[0];
    const int cols     = dims[1];
    const int channels = (nDims == 3 ? dims[2] : 1);
    
    // Allocate, copy, and convert the input image
    // @note: input is double
    cv::Mat image = cv::Mat::zeros(cv::Size(cols, rows), CV_64FC(channels));
    om::copyMatrixToOpencv(mxGetPr(prhs[0]), image);
    image.convertTo(image, CV_8U, 255);
    
    // Call OpenCV functions here and do the magic
    cv::Mat out = cv::Mat::zeros(cv::Size(cols, rows), CV_8UC(channels));
    cv::GaussianBlur(image,out,cv::Size(ksize,ksize),sigma);
    
    // Convert opencv to Matlab and set as output
    // @note: output is uint8
    plhs[0] = mxCreateNumericArray(nDims, dims, mxUINT8_CLASS, mxREAL);
    om::copyMatrixToMatlab<unsigned char>(out, (unsigned char*)mxGetPr(plhs[0]));
}
예제 #9
0
Mat apply(Image& img,int type){
	int dim(256);
	Mat lut(1, &dim, CV_8UC(img.dst.channels()));
	switch (type)
	{
	case 0:
	if (img.dst.channels()==1)
	{
		for (int i = 0; i < 256; i++)
			lut.at<uchar>(i) = uchar(255*log2(i+1)/8);
	}
	else
	{
		for (int i = 0; i < 256; i++)
		{
			lut.at<Vec3b>(i)[0] = uchar(255 * log2(i + 1) / 8);
			lut.at<Vec3b>(i)[1] = uchar(255 * log2(i + 1) / 8);
			lut.at<Vec3b>(i)[2] = uchar(255 * log2(i + 1) / 8);
		}
	}
		break;
	case 1:
	if (img.dst.channels()==1)
	{
		for (int i = 0; i < 256; i++)
			lut.at<uchar>(i) = uchar(16 * sqrt(i + 1));
	}
	else
	{
		for (int i = 0; i < 256; i++)
		{
			lut.at<Vec3b>(i)[0] = uchar(16*sqrt(i+1));
			lut.at<Vec3b>(i)[1] = uchar(16*sqrt(i+1));
			lut.at<Vec3b>(i)[2] = uchar(16*sqrt(i+1));
		}
	}
		break;
	case 2:
	if (img.dst.channels()==1)
	{
		for (int i = 0; i < 256; i++)
			lut.at<uchar>(i) = uchar(84*pow(i,0.2));
	}
	else
	{
		for (int i = 0; i < 256; i++)
		{
			lut.at<Vec3b>(i)[0] = uchar(48*pow(i,0.3));
			lut.at<Vec3b>(i)[1] = uchar(48*pow(i,0.3));
			lut.at<Vec3b>(i)[2] = uchar(48*pow(i,0.3));
		}
	}
		break;
	default:
		break;
	}
	LUT(img.original, lut, img.dst);
	return img.dst;
}
예제 #10
0
/**
* Convert gdal type to opencv type
*/
int KGDAL2CV::gdal2opencv(const GDALDataType& gdalType, const int& channels){

	switch (gdalType){

		/// UInt8
	case GDT_Byte:
		if (channels == 1){ return CV_8UC1; }
		if (channels == 3){ return CV_8UC3; }
		if (channels == 4){ return CV_8UC4; }
		else { return CV_8UC(channels); }
		return -1;

		/// UInt16
	case GDT_UInt16:
		if (channels == 1){ return CV_16UC1; }
		if (channels == 3){ return CV_16UC3; }
		if (channels == 4){ return CV_16UC4; }
		else { return CV_16UC(channels); }
		return -1;

		/// Int16
	case GDT_Int16:
		if (channels == 1){ return CV_16SC1; }
		if (channels == 3){ return CV_16SC3; }
		if (channels == 4){ return CV_16SC4; }
		else { return CV_16SC(channels); }
		return -1;

		/// UInt32
	case GDT_UInt32:
	case GDT_Int32:
		if (channels == 1){ return CV_32SC1; }
		if (channels == 3){ return CV_32SC3; }
		if (channels == 4){ return CV_32SC4; }
		else { return CV_32SC(channels); }
		return -1;

	case GDT_Float32:
		if (channels == 1){ return CV_32FC1; }
		if (channels == 3){ return CV_32FC3; }
		if (channels == 4){ return CV_32FC4; }
		else { return CV_32FC(channels); }
		return -1;

	case GDT_Float64:
		if (channels == 1){ return CV_64FC1; }
		if (channels == 3){ return CV_64FC3; }
		if (channels == 4){ return CV_64FC4; }
		else { return CV_64FC(channels); }
		return -1;

	default:
		std::cout << "Unknown GDAL Data Type" << std::endl;
		std::cout << "Type: " << GDALGetDataTypeName(gdalType) << std::endl;
		return -1;
	}

	return -1;
}
예제 #11
0
CV_IMPL void
cvConvertScaleAbs( const void* srcarr, void* dstarr,
                   double scale, double shift )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
    CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels()));
    cv::convertScaleAbs( src, dst, scale, shift );
}
void AffineTransformation::transformImage(
                              const cv::Mat& inputImage,
                              cv::Mat** transformedImage,
                              cv::Mat** transformMatrix)
{
  cv::Point2d leftTop;
  leftTop.x = 0;
  leftTop.y = 0;
  cv::Point2d leftTopImage = multiplyMatrixWithVector(leftTop);

  cv::Point2d rightTop;
  rightTop.x = inputImage.cols - 1;
  rightTop.y = 0;
  cv::Point2d rightTopImage = multiplyMatrixWithVector(rightTop);

  cv::Point2d leftBottom;
  leftBottom.x = 0;
  leftBottom.y = inputImage.rows - 1;
  cv::Point2d leftBottomImage = multiplyMatrixWithVector(leftBottom);

  cv::Point2d rightBottom;
  rightBottom.x = inputImage.cols - 1;
  rightBottom.y = inputImage.rows - 1;
  cv::Point2d rightBottomImage = multiplyMatrixWithVector(rightBottom);

  cv::Point2d maxPoint;
  maxPoint.x = std::max(leftTopImage.x,
                        std::max(rightTopImage.x,
                                 std::max(leftBottomImage.x, rightBottomImage.x)));
  maxPoint.y = std::max(leftTopImage.y,
                        std::max(rightTopImage.y,
                                 std::max(leftBottomImage.y, rightBottomImage.y)));

  cv::Point2d minPoint;
  minPoint.x = std::min(leftTopImage.x,
                        std::min(rightTopImage.x,
                                 std::min(leftBottomImage.x, rightBottomImage.x)));
  minPoint.y = std::min(leftTopImage.y,
                        std::min(rightTopImage.y,
                                 std::min(leftBottomImage.y, rightBottomImage.y)));

  cv::Size transformedImageSize;
  transformedImageSize.width = static_cast<int>(std::ceil(maxPoint.x - minPoint.x));
  transformedImageSize.height = static_cast<int>(std::ceil(maxPoint.y - minPoint.y));

  *transformMatrix = new cv::Mat(2, 3, CV_64FC1);
  *transformedImage = new cv::Mat(inputImage.size(), CV_8UC(inputImage.channels()));

  (*transformMatrix)->at<double>(0, 0) = transformationMatrix.at<double>(0, 0);
  (*transformMatrix)->at<double>(0, 1) = transformationMatrix.at<double>(0, 1);
  (*transformMatrix)->at<double>(0, 2) = transformationMatrix.at<double>(0, 2) - minPoint.x;
  (*transformMatrix)->at<double>(1, 0) = transformationMatrix.at<double>(1, 0);
  (*transformMatrix)->at<double>(1, 1) = transformationMatrix.at<double>(1, 1);
  (*transformMatrix)->at<double>(1, 2) = transformationMatrix.at<double>(1, 2) - minPoint.y;

  cv::warpAffine(inputImage, **transformedImage, **transformMatrix, inputImage.size());
}
예제 #13
0
int colorLMap( const cv::Mat& lmap, cv::Mat& cmap )
{
  if( lmap.depth() != CV_8U )  return -1;
  if( lmap.channels() != 1 )   return -1;

  int W = lmap.size().width;
  int H = lmap.size().height;

  cmap.create( H, W, CV_8UC(3) );
  colorLMap( W,H, (const Tree::Label*)lmap.data, (unsigned char*)cmap.data);
  return 0;
}
예제 #14
0
void PacketReceiver::frame2mat(const AVFrame& frameBGR, cv::Mat& matBGR)
{
	matBGR.create(codecCtx->height, codecCtx->width, CV_8UC(3));
	for (int y = 0; y < codecCtx->height; y++)
	{
		for (int x = 0; x < codecCtx->width; x++)
		{
			matBGR.at<cv::Vec3b>(y, x)[0] = frameBGR.data[0][y * frameBGR.linesize[0] + x * 3 + 0];
			matBGR.at<cv::Vec3b>(y, x)[1] = frameBGR.data[0][y * frameBGR.linesize[0] + x * 3 + 1];
			matBGR.at<cv::Vec3b>(y, x)[2] = frameBGR.data[0][y * frameBGR.linesize[0] + x * 3 + 2];
		}
	}
}
예제 #15
0
PERF_TEST_P(Size_CvtMode_Bayer, cvtColorBayer8u,
            testing::Combine(
                testing::Values(::perf::szODD, ::perf::szVGA),
                CvtModeBayer::all()
                )
            )
{
    Size sz = get<0>(GetParam());
    int mode = get<1>(GetParam());
    ChPair ch = getConversionInfo(mode);
    mode %= COLOR_COLORCVT_MAX;

    Mat src(sz, CV_8UC(ch.scn));
    Mat dst(sz, CV_8UC(ch.dcn));

    declare.time(100);
    declare.in(src, WARMUP_RNG).out(dst);

    TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn);

    SANITY_CHECK(dst, 1);
}
예제 #16
0
void Proc3DSCropper::find_colors(const cv::Mat &frame, const std::vector<float> &hues, cv::Mat &colors,
                                 const float sigma, const int nouse_s_thresh,
                                 const int nouse_v_thresh) {
    // convert to HSV image
    cv::cvtColor(frame, this->hsv_cache, CV_BGR2HSV);
    cv::medianBlur(this->hsv_cache, this->hsv_cache, 7);
    std::vector<float> color_diffs;
    this->adjust_colors(this->hsv_cache, hues, color_diffs, sigma);
    int nrows = frame.rows;
    int ncols = frame.cols;
    int r, c;
    cv::Mat color_mat = cv::Mat::zeros(nrows, ncols, CV_8UC(COLOR_NUM));
    for (r = 0; r < nrows; ++r) {
        for (c = 0; c < ncols; ++c) {
            cv::Vec3b pix = this->hsv_cache.at<cv::Vec3b>(r, c);
            int pix_hue = pix[0];
            int pix_s = pix[1];
            int pix_v = pix[2];
            int index = 0;
            cv::Vec<uchar, COLOR_NUM> vec;
            bool pix_active = true;
            if (pix_s < nouse_s_thresh || pix_v < nouse_v_thresh) {
                pix_active = false;
            }
            for (const float &hue: hues) {
                if (!pix_active) {
                    vec[index] = 0;
                    continue;
                }
                const float diff1 = hue - pix_hue;
                const float diff2 = diff1 + 180;
                const float diff3 = diff1 - 180;
                const float abs1 = std::abs(diff1);
                const float abs2 = std::abs(diff2);
                const float abs3 = std::abs(diff3);
                const float _min_diff = std::min(abs1, abs2);
                const float min_diff = std::min(_min_diff, abs3);
                const float allowable_abs_diff = color_diffs[index];
                if (allowable_abs_diff >= min_diff) {
                    vec[index] = 255;
                }
                else {
                    vec[index] = 0;
                }
                ++index;
            }
            color_mat.at<cv::Vec<uchar, COLOR_NUM>>(r, c) = vec;
        }
    }
    color_mat.copyTo(colors);
};
예제 #17
0
void BackgroundSubtractorLOBSTER_<ParallelUtils::eNonParallel>::initialize(const cv::Mat& oInitImg, const cv::Mat& oROI) {
    lvDbgExceptionWatch;
    // == init
    BackgroundSubtractorLBSP::initialize(oInitImg,oROI);
    m_voBGColorSamples.resize(m_nBGSamples);
    m_voBGDescSamples.resize(m_nBGSamples);
    for(size_t s=0; s<m_nBGSamples; ++s) {
        m_voBGColorSamples[s].create(m_oImgSize,CV_8UC((int)m_nImgChannels));
        m_voBGColorSamples[s] = cv::Scalar_<uchar>::all(0);
        m_voBGDescSamples[s].create(m_oImgSize,CV_16UC((int)m_nImgChannels));
        m_voBGDescSamples[s] = cv::Scalar_<ushort>::all(0);
    }
    m_bInitialized = true;
    refreshModel(1.0f,true);
    m_bModelInitialized = true;
}
Mat toImage(const vector<chain_code*>& codes,int row,int col){
    Mat dest(row,col, CV_8UC(1),Scalar(BACKGROUND));
    for (int i=0; i<codes.size(); i++) {
        chain_code* c=codes[i];
        int p1=c->x;
        int p2=c->y;
        dest.at<uchar>(p1,p2)=FOREGROUND;
        for (int j=0; j<c->nodes.size(); j++) {
            int nodeDirect=c->nodes[j];
            if(nodeDirect==0){
                p2++;
            }
            else if(nodeDirect==1){
                p1--;
                p2++;
            }
            else if(nodeDirect==2){
                p1--;
            }
            else if(nodeDirect==3){
                p1--;
                p2--;
            }
            else if(nodeDirect==4){
                p2--;
            }
            else if(nodeDirect==5){
                p1++;
                p2--;
            }
            else if(nodeDirect==6){
                p1++;
            }
            else if(nodeDirect==7){
                p1++;
                p2++;
            }
            dest.at<uchar>(p1,p2)=FOREGROUND;
        }
        
    }
    return dest;
}
예제 #19
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
void unsharpFiltering(const Mat& input, const Mat& boxMask, float scale, Mat& output, Mat& scaledUnsharp, Mat& blurredInput){
    Mat input_tmp;
    float* input_data;
    float* blur_data;
    float* scaled_data;
    float* output_data;
    Size inputSize;
    double t = (double)getTickCount();
    // To check the input data type is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in unsharpFiltering!!!\n");
        exit(0);
    }
    // blur the input
    spatialFiltering(input_tmp, boxMask, blurredInput);
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaledUnsharp.create(inputSize, CV_32FC(1));
    input_data = (float*)input_tmp.data;
    blur_data = (float*)blurredInput.data;
    output_data = (float*)output.data;
    scaled_data = (float*)scaledUnsharp.data;
    // start doing unsharp
    for(int i=0; i<inputSize.width*inputSize.height; i++){
        float buf;
        scaled_data[i] = scale * (input_data[i] - blur_data[i]);// get scaledUnsharp
        buf = input_data[i] + scaled_data[i];
        if(buf > 255) // pix > 255
            buf = 255;
        else if(buf < 0)// pix < 0
            buf = 0;
        output_data[i] = buf; //write output data
    }
    t = ((double)getTickCount() - t)*1000/getTickFrequency();
    printf("unsharpFiltering total consume %gms\n", t);// get the processing time
    return;
}
예제 #20
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
void powerlawTransform(const Mat& input, float r, Mat& output){
    double C;
    double t;
    Mat tmp_input;
    Mat tmp_output;
    unsigned char max_val;
    unsigned char* input_data;
    unsigned char* output_data;
    // Check the Mat data type 
    if(input.type() == CV_32FC1){ // if it is float
        //printf("CV_32FC1 Mat Dectect in powerlawTransform!!\n");
        input.convertTo(tmp_input, CV_8UC1, 255.0);
    }
    else if(input.type() == CV_8UC1){ // if it is uchar
        //printf("CV_8UC1 Mat Dectect in powerlawTransform!!\n");
        tmp_input = input;
    }
    else{ // if it is unknown type
        printf("Type Error in powerlawTransform!!!!\n");
        exit(0);
    }
    Size inputSize = tmp_input.size(); // get input size
    tmp_output.create(inputSize, CV_8UC(1));
    input_data = tmp_input.data;
    output_data = tmp_output.data;

    t = (double)getTickCount();
    max_val = 0;
    // fine the max value
    for(int index=0; index<inputSize.height*inputSize.width; index++)
        if(input_data[index] > max_val)
            max_val = input_data[index];
    C = 255.0/pow((double)max_val, (double)r); // calculate the constant C
    // do power operation
    for(int index=0; index<inputSize.height*inputSize.width; index++){
        output_data[index] = (unsigned char)(C * pow((double)input_data[index], (double)r)+0.5);
    }
    t = (double)getTickCount() - t;
    printf("powerlawTransform Total consume %gms\n", t*1000/getTickFrequency());// get processing time
    tmp_output.convertTo(output, CV_32FC1, 1/255.0); //write output data
    return;
}
예제 #21
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
//////////////////////////////////////////////////////////////////////////////////
//  ID: 102061149 Wang Fu-En                                                    //
//  For successfully compile this code you should include the following header  //
//    <cstdlib>                                                                 //
//    <stdlib.h>                                                                //
//    <stdio.h>                                                                 //
//    <iostream>                                                                //
//    <math.h>                                                                  //
//    <opencv2/highgui/highgui.hpp>                                             //
//    <opencv2/core/core.hpp>                                                   //
//  All the function in the file will compute the processing time               //
//  automatically to get the efficiency of the code, and functions will check   //
//  whether the input data is valid or not; normally, input should be grayscale //
//  image.                                                                      //
//////////////////////////////////////////////////////////////////////////////////
void logTransform(const Mat& input, Mat &output){
    double C;
    double t;
    unsigned char max_val;
    Mat tmp_input;
    Mat tmp_output;
    //The folowing is to check Mat type
    if(input.type() == CV_32FC1){ // if it is float type
        input.convertTo(tmp_input, CV_8UC1, 255.0);
        //printf("CV_32FC1 Mat Detect in logTransform\n");
    }
    else if(input.type() == CV_8UC1){ // if it is uchar type
        //printf("CV_8UC1 Mat Detect in logTransform!!\n");
        tmp_input = input;
    }
    else{ // if the type is unknown
        //printf("Type Error in logTransform!!!\n");
        exit(0);
    }
    Size size_of_input = tmp_input.size(); //get input size
    unsigned char* data = tmp_input.data;
    t = (double)getTickCount();
    max_val = 0;
    // find the max value to determin C
    for(int index=0; index < size_of_input.height * size_of_input.width; index++)
        if(data[index] > max_val)
            max_val = data[index];

    C = 255.0/(log(1.0+max_val)); // calculate the C value
    tmp_output.create(size_of_input.height, size_of_input.width, CV_8UC(1));
    unsigned char* out_data = tmp_output.data;
    // Do log operation to every point
    for(int index=0; index < size_of_input.height * size_of_input.width; index++){
        out_data[index] = (unsigned char)(C * log(1.0 + data[index]) + 0.5);
    }
    t = (double)getTickCount() - t;
    printf("logTransform Total consume %gms\n", t*1000/getTickFrequency());// get processing time
    tmp_output.convertTo(output, CV_32FC1, 1/255.0); // write output data

    return;
}
예제 #22
0
파일: lab2.cpp 프로젝트: fuenwang/DIP_LAB
void laplacianFiltering(const Mat& input, const Mat& laplacianMask, float scale, Mat& output, Mat& scaledLaplacian){
    Mat input_tmp,tmp;
    float* input_data;
    float* output_data;
    float* scaleLap_data;
    double t = (double)getTickCount();
    Size inputSize;
    spatialFiltering(input, laplacianMask, scaledLaplacian); //get scaledLaplacian(haven't multiple scale)
    // To check input data is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in laplacianFiltering!!!\n");
        exit(0);
    }
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaleLap_data = (float*)scaledLaplacian.data;
    input_data = (float*)input_tmp.data;
    output_data = (float*)output.data;
    // start doing laplace transform
    for(int p=0; p<inputSize.width*inputSize.height; p++){
        float buf;
        scaleLap_data[p] = scaleLap_data[p]*scale;//multiple scale
        buf = input_data[p] + scaleLap_data[p]; // add to origin input
        if(buf>1) // if value > 1
            buf = 1;
        else if(buf<0)// if value <0
            buf = 0;
        output_data[p] = buf;
    }
    t = (double)getTickCount()-t;
    printf("Laplacian total consume %gms\n", t*1000/getTickFrequency());// get processing time
    return;
}
/* Convert QImage to cv::Mat without data copy
 */
cv::Mat image2Mat_shared(const QImage &img, MatColorOrder *order)
{
    if (img.isNull())
        return cv::Mat();

    switch (img.format()) {
    case QImage::Format_Indexed8:
        break;
#if QT_VERSION >= 0x040400
    case QImage::Format_RGB888:
        if (order)
            *order = MCO_RGB;
        break;
#endif
    case QImage::Format_RGB32:
    case QImage::Format_ARGB32:
    case QImage::Format_ARGB32_Premultiplied:
        if (order)
            *order = getColorOrderOfRGB32Format();
        break;
#if QT_VERSION >= 0x050200
    case QImage::Format_RGBX8888:
    case QImage::Format_RGBA8888:
    case QImage::Format_RGBA8888_Premultiplied:
        if (order)
            *order = MCO_RGBA;
        break;
#endif
#if QT_VERSION >= 0x050500
    case QImage::Format_Alpha8:
    case QImage::Format_Grayscale8:
        break;
#endif
    default:
        return cv::Mat();
    }
    return cv::Mat(img.height(), img.width(), CV_8UC(img.depth()/8), (uchar*)img.bits(), img.bytesPerLine());
}
예제 #24
0
Mat Histogram::likeyhoodImage(Mat image)
{

Mat out;
int *c=(int *)calloc(sizeof(int),_channels.size());
for(int i=0;i<_channels.size();i++)
{
    c[i]=_channels[i];

}

int *h=(int *)calloc(sizeof(int),_channels.size());
for(int i=0;i<_channels.size();i++)
{
    h[i]=_histSize[_channels[i]];

}

float **ranges=(float **)calloc(sizeof(float*),_channels.size());
int size=_channels.size();
for(int i=0;i<size;i++)
{
    float *x=(float *)calloc(sizeof(float),2);
    int index=2*_channels[i];
    x[0]=_histRange[index];
    x[1]=_histRange[index+1];
    ranges[i]=x;
    //cerr << x[0] << ":" <<x[1] <<endl;
}
   image.copyTo(out);
   out.convertTo(out,CV_32FC(1),1,0);
   out.setTo(cv::Scalar::all(0));
   Mat i2;
   cv::normalize(_histMat,i2,0,255,NORM_MINMAX);
   cv::calcBackProject(&image,1,c,i2,out,(const float **)ranges,1,true);
   out.convertTo(out,CV_8UC(1),1.0,0);
   return out;
}
예제 #25
0
파일: main.cpp 프로젝트: Aharobot/m19404
int main(int argc, char *argv[])
{

    if(argc<2)
        cerr << "Usage : ColorConstancy filename " << endl;



    Mat image=imread(argv[1]);

    cerr << "reading image " << argv[2] << endl;

    color_correction::contrast_stretching a;
    color_correction::gray_world b1;
    color_correction::gray_edge b2;
    color_correction::maxRGB b3;
    color_correction::max_edge b4;

    Mat input;
    input.create(240,320,CV_8UC(3));
    resize(image,input, input.size(), 0, 0, INTER_NEAREST);



    imshow("original",input);
    imshow("contrast stretching",a.run(input));
    imshow("gray world RGB",b1.run2(input,1,2));
    imshow("gray world Lab",b1.run1(input,1));
    imshow("Shades of gray",b1.run2(input,6,2));
    imshow("maxRGB",b3.run(input,6,0));
    imshow("gray edge",b2.run(input,1,0));
    imshow("max edge",b4.run(input,1,0));



    cv::waitKey(0);

}
vector<chain_code*> extractChainCodesLevel(const Mat& src,int level){
    vector<chain_code*> chainCodes;
    Mat I;
    threshold(src, I, THRESHOLDVALUE, 255, CV_THRESH_BINARY);
    
    Mat PNH=Mat(3,3, CV_8UC(1),Scalar(BACKGROUND));
    chain_segment* actchain_ptr=NULL;
    bool actchain_isLeft=true;
    node* frame=new node(NULL,NULL,NULL,"frame");
    node* CurrentHole=frame;
    node* CurrentObject=NULL;
    int isMinPoint = 0;
    int currentCoordinateI;
    int currentCoordinateJ;
    vector<coordinates*> savedCoordinates;
    if(level==-1) level=INT_MAX;
    
    for (int i=0; i<I.rows; i++) {
        for (int j=0; j<I.cols; j++) {
            currentCoordinateI=i;
            currentCoordinateJ=j;
            for( int di = 0; di < 3; di++) {
                for(int dj = 0; dj < 3; dj ++ ) {
                    PNH.at<uchar>(di, dj) = getPixel(I, i + di - 1, j + dj - 1);
                }
            }
            if (!isInvalid(PNH)) {
                if(getPixel(PNH, 1, 1)==BACKGROUND) continue;
                if(isMin0(PNH)&&isMax0(PNH)){
                    chain_code* ch=new chain_code(i,j);
                    chainCodes.push_back(ch);
                    continue;
                }
                searchDirections(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,isMinPoint);
                searchMinPoints(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,isMinPoint,currentCoordinateI,currentCoordinateJ,savedCoordinates);
                searchMaxPoints(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,currentCoordinateI,currentCoordinateJ);
            }
        }
        
    }
    int parentSize=1, childSize=0,le=0;
    node * temp;
    queue<node *> q;
    q.push(frame);
    do
    {
        temp = q.front();
        if(temp!=frame&&temp->coord!=NULL){
            coordinates* c = temp->coord;
            chain_code* newChain=new chain_code(c->x,c->y);
            chain_segment* moveChain;
            if(c->typeMin) moveChain=c->whichNode->right;
            else moveChain=c->whichNode->left;
            chain_segment* copyFirst=moveChain;
            while(true){
                vector<int>& vecs=moveChain->chainCodes->nodes;
                if(moveChain->isLeft) reverse(vecs.begin(), vecs.end());
                for (int m=0; m<vecs.size(); m++) {
                    newChain->nodes.push_back(vecs[m]);
                }
                chain_segment* tobeDeleted = moveChain;
                moveChain=moveChain->connectNext;
                delete tobeDeleted->chainCodes;
                delete tobeDeleted;
                if(moveChain==copyFirst) break;
            }
            delete c;
            chainCodes.push_back(newChain);
        }
        q.pop();
        for(int i=0; i<temp->children.size(); i++){
            if(temp->children[i]) q.push(temp->children[i]);
            childSize ++;
        }
        delete temp;
        parentSize--;
        if(parentSize == 0){
            parentSize = childSize;
            childSize = 0;
            le++;
        }
    } while(!q.empty()&&le<level);
    return chainCodes;
}
vector<chain_code*> extractChainCodes(const Mat& src){
    vector<chain_code*> chainCodes;
    Mat I;
    threshold(src, I, THRESHOLDVALUE, 255, CV_THRESH_BINARY);

    Mat PNH=Mat(3,3, CV_8UC(1),Scalar(BACKGROUND));
    chain_segment* actchain_ptr=NULL;
    bool actchain_isLeft=true;
    node* frame=new node(NULL,NULL,NULL,"frame");
    node* CurrentHole=frame;
    node* CurrentObject=NULL;
    int isMinPoint=0;
    int currentCoordinateI;
    int currentCoordinateJ;
    vector<coordinates*> savedCoordinates;

    for (int i=0; i<I.rows; i++) {
        for (int j=0; j<I.cols; j++) {
            currentCoordinateI=i;
            currentCoordinateJ=j;
            for( int di = 0; di < 3; di++) {
                for(int dj = 0; dj < 3; dj ++ ) {
                    PNH.at<uchar>(di, dj) = getPixel(I, i + di - 1, j + dj - 1);
                }
            }
            if (!isInvalid(PNH)) {
                if(getPixel(PNH, 1, 1)==BACKGROUND) continue;
                if(isMin0(PNH)&&isMax0(PNH)){
                    chain_code* ch=new chain_code(i,j);
                    chainCodes.push_back(ch);
                    continue;
                }
                searchDirections(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,isMinPoint);
                searchMinPoints(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,isMinPoint,currentCoordinateI,currentCoordinateJ,savedCoordinates);
                searchMaxPoints(PNH,actchain_isLeft,actchain_ptr,CurrentHole,CurrentObject,currentCoordinateI,currentCoordinateJ);
            }
        }

    }
    for(int n=0;n<savedCoordinates.size();n++){
        coordinates* c = savedCoordinates[n];
        chain_code* newChain=new chain_code(c->x,c->y);
        chain_segment* moveChain;
        if(c->typeMin) moveChain=c->whichNode->right;
        else moveChain=c->whichNode->left;
        chain_segment* copyFirst=moveChain;
        while(true){
            vector<int>& vecs=moveChain->chainCodes->nodes;
            if(moveChain->isLeft) reverse(vecs.begin(), vecs.end());
            for (int m=0; m<vecs.size(); m++) {
                newChain->nodes.push_back(vecs[m]);
            }
            chain_segment* tobeDeleted = moveChain;
            moveChain=moveChain->connectNext;
            delete tobeDeleted->chainCodes;
            delete tobeDeleted;
            if(moveChain==copyFirst) break;
            //if(moveChain==NULL) break;
        }
        delete c->whichNode;
        delete c;
        chainCodes.push_back(newChain);
    }
    delete frame;
    return chainCodes;
}
예제 #28
0
void blobfinder::FindWindow(Image* input, Mat output,ColorFilter *m_redFilter, ColorFilter *m_greenFilter, ColorFilter *m_yellowFilter)
{


   //convert from Image* to Mat
    Mat img = input->asIplImage();
  //output = input;
   //White balance image
	
   Mat img_whitebalance = cv::Mat::zeros(img.rows,img.cols,CV_8UC(3));
    img_whitebalance = WhiteBalance(img); 

    imshow("White Balance",img_whitebalance);

	Mat img_hsv;
	Mat img_temp;
	Mat img_red, img_green, img_yellow; //thresholded for these colors

	CvPoint point;
	unsigned int i;
	

		//Color Thresholding
		//Step 1: Convert to HSV space, HSV space is less susceptible to lighting changes
		//HSV stands for Hue, saturation and value. In theory, we can do 'color thresholding' just based on Hue
		cvtColor(img_whitebalance,img_hsv,CV_BGR2HSV);
		
		//Step 2:Threshold with pre-defined levels
		//Hue range from 0 to 180 in OpenCV
		//int red_minH, green_minH, green_maxH, yellow_minH, yellow_maxH;
		//yellow_minH = 0; //10
		//yellow_maxH =60; //40
		//red_minH =60; //15 works well 
		//green_minH = 120;//30
		//green_maxH = 180; //60
		//hsv - from 120-180 seems to be a good red filter
		//these levels should be defined in an XML file and not just loaded here
		int red_minH= m_redFilter->getChannel3Low();
		int red_maxH= m_redFilter->getChannel3High();

		int yellow_minH= m_yellowFilter->getChannel3Low();
		int yellow_maxH= m_yellowFilter->getChannel3High();

		int green_minH= m_greenFilter->getChannel3Low();
		int green_maxH= m_greenFilter->getChannel3High();
	
		vector<Mat> hsv_planes;
		split(img_hsv,hsv_planes);

		//first take any value higher than max and converts it to 0
		//red is a special case because the hue value for red are 0-10 and 170-1980
		//same filter as the other cases followed by an invert
		threshold(hsv_planes[0],img_red,red_minH,255,THRESH_TOZERO);
		threshold(img_red,img_red,red_maxH,0,THRESH_TOZERO_INV);
		threshold(img_red,img_red,1,255,THRESH_BINARY_INV);

		threshold(hsv_planes[0],img_yellow,yellow_maxH,0,THRESH_TOZERO_INV);
		threshold(img_yellow,img_yellow,yellow_minH,255,THRESH_TOZERO);

		threshold(hsv_planes[0],img_green,green_maxH,0,THRESH_TOZERO_INV);
		threshold(img_green,img_green,green_minH,255,THRESH_TOZERO);

		threshold(img_red,img_red,1,255,THRESH_BINARY);
		threshold(img_green,img_green,1,255,THRESH_BINARY);
		threshold(img_yellow,img_yellow,1,255,THRESH_BINARY);

		//imshow("Red Single Plane",img_red);
		//imshow("Yellow Plane",img_yellow);
		//imshow("Green Plane",img_green);
		
		hsv_planes[2] = img_red;
		hsv_planes[0] = img_yellow;
		hsv_planes[1] = img_green;
		merge(hsv_planes,img_whitebalance);

		//Step 3: Blob detection! On the thresheld (thresholded?) image
		//Attempt 2:
		//creation 
		//SimpleBlobDetector* blob_detector;
		//blob_detector = new SimpleBlobDetector();
		//blob_detector->creat("SimpleBlobDetector");
		//change params, first move it tp public
	

		SimpleBlobDetector::Params params;
		params.minDistBetweenBlobs =0;
		//params.minThreshold =100;
		//params.maxThreshold =256;
		//params.thresholdStep = 200;
		//params.filterByArea=false;
		params.filterByInertia = true;
		params.filterByConvexity = false;
		params.filterByColor=false;
		params.filterByArea = true;
		params.filterByCircularity = true;
		params.minArea =100.0f;
		params.maxArea = 20000.0f;
		params.minCircularity = 0.3;
		params.maxCircularity = 1.5;
		params.filterByColor = false;
		params.minInertiaRatio =.2;
		params.maxInertiaRatio = 1.3;

		//create
		SimpleBlobDetector blob_red(params);
		SimpleBlobDetector blob_yellow(params);
		SimpleBlobDetector blob_green(params);
		blob_red.create("SimpleBlob");
		blob_green.create("SimpleBlob");
		blob_yellow.create("SimpleBlob");
		//detect
		//vector<KeyPoint> keypoints_red;
		//vector<KeyPoint> keypoints_green;
		//vector<KeyPoint> keypoints_yellow;
		blob_red.detect(img_red,_keypoints_red);
		blob_yellow.detect(img_yellow,_keypoints_yellow);
		blob_green.detect(img_green,_keypoints_green);
		//extract x y coordinates of the keypoints
		
		printf("\n Red");
		for (i=0;i<_keypoints_red.size();i++)
		{		
			point.x = _keypoints_red[i].pt.x;
			point.y=  _keypoints_red[i].pt.y;
			printf("\n Size: %f,angle %f, response = %f",_keypoints_red[i].size,_keypoints_red[i].angle, _keypoints_red[i].response);
			circle(img_whitebalance,point,5,Scalar(0,0,150),1,8,0);
			printf("\n Keypoint = %d, (%d, %d)",i,point.x,point.y);
		}	
		printf("\n Yellow");

		for (i=0;i<_keypoints_yellow.size();i++)
		{		
			point.x = _keypoints_yellow[i].pt.x;
			point.y=  _keypoints_yellow[i].pt.y;
			printf("\n Size: %f,angle %f, response = %f",_keypoints_yellow[i].size,_keypoints_yellow[i].angle, _keypoints_yellow[i].response);
			circle(img_whitebalance,point,5,Scalar(0,255,255),1,8,0);
			printf("\n Keypoint = %d, (%d, %d)",i,point.x,point.y);
		}
		printf("\n Green");
		for (i=0;i<_keypoints_green.size();i++)
		{		
			point.x = _keypoints_green[i].pt.x;
			point.y=  _keypoints_green[i].pt.y;
			printf("\n Size: %f,angle %f, response = %f",_keypoints_green[i].size,_keypoints_green[i].angle, _keypoints_green[i].response);
			circle(img_whitebalance,point,5,Scalar(0,150,0),1,8,0);
			printf("\n Keypoint = %d, (%d, %d)",i,point.x,point.y);
		}

		imshow("Found Blobs",img_whitebalance);
		//imshow("img_red",img_red);
		//cvWaitKey(0);
		

	//convert back to Imatg*
	
	//output->setData(img_whitebalance.data,false);
//output = img_whitebalance;

};
void BackgroundSubtractorLOBSTER::initialize(const cv::Mat& oInitImg, const cv::Mat& oROI) {
	CV_Assert(!oInitImg.empty() && oInitImg.cols>0 && oInitImg.rows>0);
	CV_Assert(oInitImg.isContinuous());
	CV_Assert(oInitImg.type()==CV_8UC1 || oInitImg.type()==CV_8UC3);
	if(oInitImg.type()==CV_8UC3) {
		std::vector<cv::Mat> voInitImgChannels;
		cv::split(oInitImg,voInitImgChannels);
		if(!cv::countNonZero((voInitImgChannels[0]!=voInitImgChannels[1])|(voInitImgChannels[2]!=voInitImgChannels[1])))
			std::cout << std::endl << "\tBackgroundSubtractorLOBSTER : Warning, grayscale images should always be passed in CV_8UC1 format for optimal performance." << std::endl;
	}
	cv::Mat oNewBGROI;
	if(oROI.empty() && (m_oROI.empty() || oROI.size()!=oInitImg.size())) {
		oNewBGROI.create(oInitImg.size(),CV_8UC1);
		oNewBGROI = cv::Scalar_<uchar>(UCHAR_MAX);
	}
	else if(oROI.empty())
		oNewBGROI = m_oROI;
	else {
		CV_Assert(oROI.size()==oInitImg.size() && oROI.type()==CV_8UC1);
		CV_Assert(cv::countNonZero((oROI<UCHAR_MAX)&(oROI>0))==0);
		oNewBGROI = oROI.clone();
	}
	LBSP::validateROI(oNewBGROI);
	const size_t nROIPxCount = (size_t)cv::countNonZero(oNewBGROI);
	CV_Assert(nROIPxCount>0);
	m_oROI = oNewBGROI;
	m_oImgSize = oInitImg.size();
	m_nImgType = oInitImg.type();
	m_nImgChannels = oInitImg.channels();
	m_nTotPxCount = m_oImgSize.area();
	m_nTotRelevantPxCount = nROIPxCount;
	m_nFrameIndex = 0;
	m_nFramesSinceLastReset = 0;
	m_nModelResetCooldown = 0;
	m_oLastFGMask.create(m_oImgSize,CV_8UC1);
	m_oLastFGMask = cv::Scalar_<uchar>(0);
	m_oLastColorFrame.create(m_oImgSize,CV_8UC((int)m_nImgChannels));
	m_oLastColorFrame = cv::Scalar_<uchar>::all(0);
	m_oLastDescFrame.create(m_oImgSize,CV_16UC((int)m_nImgChannels));
	m_oLastDescFrame = cv::Scalar_<ushort>::all(0);
	m_voBGColorSamples.resize(m_nBGSamples);
	m_voBGDescSamples.resize(m_nBGSamples);
	for(size_t s=0; s<m_nBGSamples; ++s) {
		m_voBGColorSamples[s].create(m_oImgSize,CV_8UC((int)m_nImgChannels));
		m_voBGColorSamples[s] = cv::Scalar_<uchar>::all(0);
		m_voBGDescSamples[s].create(m_oImgSize,CV_16UC((int)m_nImgChannels));
		m_voBGDescSamples[s] = cv::Scalar_<ushort>::all(0);
	}
	if(m_aPxIdxLUT)
		delete[] m_aPxIdxLUT;
	if(m_aPxInfoLUT)
	    delete[] m_aPxInfoLUT;
	m_aPxIdxLUT = new size_t[m_nTotRelevantPxCount];
	m_aPxInfoLUT = new PxInfoBase[m_nTotPxCount];
	if(m_nImgChannels==1) {
		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width && m_oLastColorFrame.step.p[1]==1);
		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
		for(size_t t=0; t<=UCHAR_MAX; ++t)
			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>((t*m_fRelLBSPThreshold+m_nLBSPThresholdOffset)/2);
		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
			if(m_oROI.data[nPxIter]) {
				m_aPxIdxLUT[nModelIter] = nPxIter;
				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
				m_oLastColorFrame.data[nPxIter] = oInitImg.data[nPxIter];
				const size_t nDescIter = nPxIter*2;
				LBSP::computeGrayscaleDescriptor(oInitImg,oInitImg.data[nPxIter],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxIter]],*((ushort*)(m_oLastDescFrame.data+nDescIter)));
				++nModelIter;
			}
		}
	}
	else { //m_nImgChannels==3
		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width*3 && m_oLastColorFrame.step.p[1]==3);
		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
		for(size_t t=0; t<=UCHAR_MAX; ++t)
			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>(t*m_fRelLBSPThreshold+m_nLBSPThresholdOffset);
		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
			if(m_oROI.data[nPxIter]) {
				m_aPxIdxLUT[nModelIter] = nPxIter;
				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
				const size_t nPxRGBIter = nPxIter*3;
				const size_t nDescRGBIter = nPxRGBIter*2;
				for(size_t c=0; c<3; ++c) {
					m_oLastColorFrame.data[nPxRGBIter+c] = oInitImg.data[nPxRGBIter+c];
					LBSP::computeSingleRGBDescriptor(oInitImg,oInitImg.data[nPxRGBIter+c],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxRGBIter+c]],((ushort*)(m_oLastDescFrame.data+nDescRGBIter))[c]);
				}
				++nModelIter;
			}
		}
	}
	m_bInitialized = true;
	refreshModel(1.0f);
}
예제 #30
0
int Train(ToolParam &tool_param, CommonSettings &settings) {

  if (tool_param.train_size() <= settings.param_index) {
    LOG(FATAL)<< "Train parameter index does not exist.";
  }

  TrainParam train_param = tool_param.train(settings.param_index);
  InputParam input_param = train_param.input();

  if(!(input_param.has_patch_size() && input_param.has_padding_size() && input_param.has_labels() && input_param.has_channels())) {
    LOG(FATAL) << "Patch size, padding size, label count or channel count parameter missing.";
  }
  int patch_size = input_param.patch_size();
  int padding_size = input_param.padding_size();
  unsigned int nr_labels = input_param.labels();
  unsigned int nr_channels = input_param.channels();

  std::string proto_solver = "";
  if(!train_param.has_solver()) {
    LOG(FATAL) << "Solver prototxt file argument missing";
  }

  proto_solver = train_param.solver();

  caffe::SolverParameter solver_param;
  caffe::ReadProtoFromTextFileOrDie(proto_solver, &solver_param);

  int test_interval = solver_param.has_test_interval()?solver_param.test_interval():-1;

  shared_ptr<caffe::Solver<float> > solver(
      caffe::GetSolver<float>(solver_param));

  if(train_param.has_solverstate()) {
    // Continue from previous solverstate
    const char* solver_state_c = train_param.solverstate().c_str();
    solver->Restore(solver_state_c);
  }

  // Get handles to the test and train network of the Caffe solver
  boost::shared_ptr<caffe::Net<float>> train_net = solver->net();
  boost::shared_ptr<caffe::Net<float>> test_net;
  if(solver->test_nets().size() > 0) {
    test_net = solver->test_nets()[0];
  }

  // Overwrite label count from the desired count to the pre-consolidation count
  if(input_param.has_preprocessor()) {
    PreprocessorParam preprocessor_param = input_param.preprocessor();
    if(preprocessor_param.has_label_consolidate()) {
      nr_labels = preprocessor_param.label_consolidate().label_size();
    }
  }

  TrainImageProcessor image_processor(patch_size, nr_labels);

  if(input_param.has_preprocessor()) {

    PreprocessorParam preprocessor_param = input_param.preprocessor();

    image_processor.SetBorderParams(input_param.has_padding_size(), padding_size / 2);
    image_processor.SetRotationParams(preprocessor_param.has_rotation() && preprocessor_param.rotation());
    image_processor.SetPatchMirrorParams(preprocessor_param.has_mirror() && preprocessor_param.mirror());
    image_processor.SetNormalizationParams(preprocessor_param.has_normalization() && preprocessor_param.normalization());

    if(preprocessor_param.has_label_consolidate()) {
      LabelConsolidateParam label_consolidate_param = preprocessor_param.label_consolidate();
      std::vector<int> con_labels;
      for(int cl = 0; cl < label_consolidate_param.label_size(); ++ cl) {
        con_labels.push_back(label_consolidate_param.label(cl));
      }
      image_processor.SetLabelConsolidateParams(preprocessor_param.has_label_consolidate(), con_labels);
    }

    if(preprocessor_param.has_histeq()) {
      PrepHistEqParam histeq_param = preprocessor_param.histeq();
      std::vector<float> label_boost(nr_labels, 1.0);
      for(int i = 0; i < histeq_param.label_boost().size(); ++i) {
        label_boost[i] = histeq_param.label_boost().Get(i);
      }
      image_processor.SetLabelHistEqParams(true, histeq_param.has_patch_prior()&&histeq_param.patch_prior(), histeq_param.has_masking()&&histeq_param.masking(), label_boost);
    }

    if(preprocessor_param.has_crop()) {
      PrepCropParam crop_param = preprocessor_param.crop();
      image_processor.SetCropParams(crop_param.has_imagecrop()?crop_param.imagecrop():0, crop_param.has_labelcrop()?crop_param.labelcrop():0);
    }

    if(preprocessor_param.has_clahe()) {
      PrepClaheParam clahe_param = preprocessor_param.clahe();
      image_processor.SetClaheParams(true, clahe_param.has_clip()?clahe_param.clip():4.0);
    }

    if(preprocessor_param.has_blur()) {
      PrepBlurParam blur_param = preprocessor_param.blur();
      image_processor.SetBlurParams(true, blur_param.has_mean()?blur_param.mean():0.0, blur_param.has_std()?blur_param.std():0.1, blur_param.has_ksize()?blur_param.ksize():5);
    }

  }

  if(!(input_param.has_raw_images() && input_param.has_label_images())) {
    LOG(FATAL) << "Raw images or label images folder missing.";
  }

  std::set<std::string> filetypes = CreateImageTypesSet();

  int error;
  std::vector<std::vector<bofs::path>> training_set = LoadTrainingSetItems(filetypes, input_param.raw_images(),input_param.label_images(),&error);

  unsigned int ijsum = 0;
  // Preload and preprocess all images
  for (unsigned int i = 0; i < training_set.size(); ++i) {
    std::vector<bofs::path> training_item = training_set[i];

    std::vector<cv::Mat> raw_stack;
    std::vector<std::vector<cv::Mat>> labels_stack(training_item.size() - 1);

    std::string type = bofs::extension(training_item[0]);
    std::transform(type.begin(), type.end(), type.begin(), ::tolower);

    if(type == ".tif" || type == ".tiff") {
      // TIFF and multipage TIFF mode
      raw_stack = LoadTiff(training_item[0].string(), nr_channels);
    } else {
      // All other image types
      cv::Mat raw_image = cv::imread(training_item[0].string(), nr_channels == 1 ? CV_LOAD_IMAGE_GRAYSCALE :
          CV_LOAD_IMAGE_COLOR);
      raw_stack.push_back(raw_image);
    }

    for(unsigned int k = 0; k < training_item.size() - 1; ++k) {
      std::string type = bofs::extension(training_item[k+1]);
      std::transform(type.begin(), type.end(), type.begin(), ::tolower);
      if(type == ".tif" || type == ".tiff") {
        std::vector<cv::Mat> label_stack = LoadTiff(training_item[k+1].string(), 1);
        labels_stack[k] = label_stack;
      }
      else {
        std::vector<cv::Mat> label_stack;
        cv::Mat label_image = cv::imread(training_item[k+1].string(), CV_LOAD_IMAGE_GRAYSCALE);
        label_stack.push_back(label_image);
        labels_stack[k] = label_stack;
      }
    }

    for (unsigned int j = 0; j < raw_stack.size(); ++j) {
      std::vector<cv::Mat> label_images;
      for(unsigned int k = 0; k < labels_stack.size(); ++k) {
        label_images.push_back(labels_stack[k][j]);
      }

      if(label_images.size() > 1 && nr_labels != 2 && label_images.size() < nr_labels) {
        // Generate complement label
        cv::Mat clabel(label_images[0].rows, label_images[0].cols, CV_8UC(1), 255.0);
        for(unsigned int k = 0; k < label_images.size(); ++k) {
          cv::subtract(clabel,label_images[k],clabel);
        }
        label_images.push_back(clabel);
      }

      image_processor.SubmitImage(raw_stack[j], ijsum, label_images);
      ++ijsum;
    }
  }

  image_processor.Init();

  std::vector<long> labelcounter(nr_labels + 1);

  int train_iters = solver_param.has_max_iter()?solver_param.max_iter():0;

  // Do the training
  for (int i = 0; i < train_iters; ++i) {
    std::vector<cv::Mat> patch = image_processor.DrawPatchRandom();

    std::vector<cv::Mat> images;
    std::vector<cv::Mat> labels;

    images.push_back(patch[0]);
    labels.push_back(patch[1]);

    // TODO: Only enable in debug or statistics mode
    for (int y = 0; y < patch_size; ++y) {
      for (int x = 0; x < patch_size; ++x) {
        labelcounter[patch[1].at<float>(y, x) + 1] += 1;
      }
    }

    if(settings.debug) {
      for (unsigned int k = 0; k < nr_labels + 1; ++k) {
        std::cout << "Label: " << k << ", " << labelcounter[k] << std::endl;
      }
    }

    if(settings.graphic) {

      cv::Mat test;

      double minVal, maxVal;
      cv::minMaxLoc(patch[1], &minVal, &maxVal);
      patch[1].convertTo(test, CV_32FC1, 1.0 / (maxVal - minVal),
          -minVal * 1.0 / (maxVal - minVal));

      std::vector<cv::Mat> tv;
      tv.push_back(test);
      tv.push_back(test);
      tv.push_back(test);
      cv::Mat tvl;

      cv::merge(tv, tvl);

      cv::Mat patchclone = patch[0].clone();

      tvl.copyTo(
          patchclone(
              cv::Rect(padding_size / 2, padding_size / 2, patch_size,
                  patch_size)));

      cv::imshow(OCVDBGW, patchclone);
      cv::waitKey(10);
    }

    // The labels
    std::vector<int> lalabels;
    lalabels.push_back(0);
    boost::dynamic_pointer_cast<caffe::MemoryDataLayer<float>>(
        train_net->layers()[0])->AddMatVector(labels, lalabels);

    // The images
    std::vector<int> imlabels;
    imlabels.push_back(0);
    boost::dynamic_pointer_cast<caffe::MemoryDataLayer<float>>(
        train_net->layers()[1])->AddMatVector(images, imlabels);

    solver->Step(1);

    if(test_interval > -1 && i % test_interval == 0) {
      // TODO: Run tests with the testset and testnet
      // TODO: Apply ISBI and other quality measures (cross, rand, pixel, warp, loss)
      // TODO: Write out statistics to file
    }
  }

  LOG(INFO) << "Training done!";

  return 0;
}