Пример #1
0
void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj)
{
    CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
    CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

    Mat traj;
    _traj.getMat().convertTo(traj, CV_64F);
    const Affine3d* dpath = traj.ptr<Affine3d>();
    size_t total = traj.total();

    points = vtkSmartPointer<vtkPoints>::New();
    points->SetDataType(VTK_DOUBLE);
    points->SetNumberOfPoints((vtkIdType)total);

    tensors = vtkSmartPointer<vtkDoubleArray>::New();
    tensors->SetNumberOfComponents(9);
    tensors->SetNumberOfTuples((vtkIdType)total);

    for(size_t i = 0; i < total; ++i, ++dpath)
    {
        Matx33d R = dpath->rotation().t();  // transposed because of
        tensors->SetTuple((vtkIdType)i, R.val);        // column major order

        Vec3d p = dpath->translation();
        points->SetPoint((vtkIdType)i, p.val);
    }
}
void 
mexFunction(int nlhs, mxArray *plhs[], 
            int nrhs, const mxArray *prhs[])
{
    // default parameters
    int ksize = 3;
    double sigma = 2.0;

    //validate input
    if (nrhs == 0)
    {
        mexErrMsgTxt("An image is required!");
    }
    if (nlhs != 1)
    {
        mexErrMsgTxt("Only one output is provided.");
    }
    if(!mxIsDouble(prhs[0]) || ((mxGetNumberOfDimensions(prhs[0]) != 3) && (mxGetNumberOfDimensions(prhs[0]) != 2)))
    {
        mexErrMsgTxt("Type of the image has to be double.");
    }
    if((nrhs >= 2)  && ((!mxIsDouble(prhs[1])) || (mxGetScalar(prhs[1]) <= 0)))
    {
        mexErrMsgTxt("ksize has to be a positive integer.");
    } 
    else if (nrhs >= 2)
    {
        ksize = (int) mxGetScalar(prhs[1]);
    }
    if((nrhs >= 3)  && ((!mxIsDouble(prhs[2])) || (mxGetScalar(prhs[2]) <= 0)))
    {
        mexErrMsgTxt("sigma has to be a positive value.");
    } 
    else if (nrhs >= 3)
    {
        sigma = (double) mxGetScalar(prhs[2]);
    }
    
    // determine input/output image properties
    const int *dims    = mxGetDimensions(prhs[0]);
    const int nDims    = mxGetNumberOfDimensions(prhs[0]);
    const int rows     = dims[0];
    const int cols     = dims[1];
    const int channels = (nDims == 3 ? dims[2] : 1);
    
    // Allocate, copy, and convert the input image
    // @note: input is double
    cv::Mat image = cv::Mat::zeros(cv::Size(cols, rows), CV_64FC(channels));
    om::copyMatrixToOpencv(mxGetPr(prhs[0]), image);
    image.convertTo(image, CV_8U, 255);
    
    // Call OpenCV functions here and do the magic
    cv::Mat out = cv::Mat::zeros(cv::Size(cols, rows), CV_8UC(channels));
    cv::GaussianBlur(image,out,cv::Size(ksize,ksize),sigma);
    
    // Convert opencv to Matlab and set as output
    // @note: output is uint8
    plhs[0] = mxCreateNumericArray(nDims, dims, mxUINT8_CLASS, mxREAL);
    om::copyMatrixToMatlab<unsigned char>(out, (unsigned char*)mxGetPr(plhs[0]));
}
Пример #3
0
/**
* Convert gdal type to opencv type
*/
int KGDAL2CV::gdal2opencv(const GDALDataType& gdalType, const int& channels){

	switch (gdalType){

		/// UInt8
	case GDT_Byte:
		if (channels == 1){ return CV_8UC1; }
		if (channels == 3){ return CV_8UC3; }
		if (channels == 4){ return CV_8UC4; }
		else { return CV_8UC(channels); }
		return -1;

		/// UInt16
	case GDT_UInt16:
		if (channels == 1){ return CV_16UC1; }
		if (channels == 3){ return CV_16UC3; }
		if (channels == 4){ return CV_16UC4; }
		else { return CV_16UC(channels); }
		return -1;

		/// Int16
	case GDT_Int16:
		if (channels == 1){ return CV_16SC1; }
		if (channels == 3){ return CV_16SC3; }
		if (channels == 4){ return CV_16SC4; }
		else { return CV_16SC(channels); }
		return -1;

		/// UInt32
	case GDT_UInt32:
	case GDT_Int32:
		if (channels == 1){ return CV_32SC1; }
		if (channels == 3){ return CV_32SC3; }
		if (channels == 4){ return CV_32SC4; }
		else { return CV_32SC(channels); }
		return -1;

	case GDT_Float32:
		if (channels == 1){ return CV_32FC1; }
		if (channels == 3){ return CV_32FC3; }
		if (channels == 4){ return CV_32FC4; }
		else { return CV_32FC(channels); }
		return -1;

	case GDT_Float64:
		if (channels == 1){ return CV_64FC1; }
		if (channels == 3){ return CV_64FC3; }
		if (channels == 4){ return CV_64FC4; }
		else { return CV_64FC(channels); }
		return -1;

	default:
		std::cout << "Unknown GDAL Data Type" << std::endl;
		std::cout << "Type: " << GDALGetDataTypeName(gdalType) << std::endl;
		return -1;
	}

	return -1;
}
Пример #4
0
void cv::viz::writeTrajectory(InputArray _traj, const String& files_format, int start, const String& tag)
{
    if (_traj.kind() == _InputArray::STD_VECTOR_MAT)
    {
#if CV_MAJOR_VERSION < 3
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.obj;
#else
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.getObj();
#endif

        for(size_t i = 0, index = max(0, start); i < v.size(); ++i, ++index)
        {
            Affine3d affine;
            Mat pose = v[i];
            CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16));
            pose.copyTo(affine.matrix);
            writePose(cv::format(files_format.c_str(), index), affine, tag);
        }
        return;
    }

    if (_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT)
    {
        CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

        Mat traj = _traj.getMat();

        if (traj.depth() == CV_32F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3f>((int)i), tag);

        if (traj.depth() == CV_64F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3d>((int)i), tag);
        return;
    }

    CV_Error(Error::StsError, "Unsupported array kind");
}
Пример #5
0
Mat SurfFeature::FeatureEvaluate(const Mat &_sumImg, float _scale)
{
	CV_Assert( _scale >= 1 );
	CV_Assert( _sumImg.type() == CV_64FC(8) );
	CV_Assert( feature != Rect() );

	int tx = ((int)((feature.x + 1) * _scale + 0.5) - 1);
	int ty = ((int)((feature.y + 1) * _scale + 0.5) - 1);
	int bx = (int)(feature.br().x * _scale + 0.5);
	int by = (int)(feature.br().y * _scale + 0.5);

	Rect scaleFst(Point(tx,ty),Point(bx,by));
	if( scaleFst.br().x >= _sumImg.rows || scaleFst.br().y >= _sumImg.cols )
		CV_Error(CV_StsOutOfRange,"Scale feature size is larger than given window size!");

#define SumType Vec<double,8>

	Point mid((int)(scaleFst.x + scaleFst.width * 0.5 + 0.5),
		(int)(scaleFst.y + scaleFst.height * 0.5 + 0.5));

	Vec<double,8> res[4];
	res[0] = _sumImg.at<SumType>(mid) + _sumImg.at<SumType>(scaleFst.tl())
		- _sumImg.at<SumType>(mid.y, scaleFst.x) - _sumImg.at<SumType>(scaleFst.y, mid.x);

	res[1] = _sumImg.at<SumType>(mid.y,scaleFst.br().x) + _sumImg.at<SumType>(scaleFst.y,mid.x)
		- _sumImg.at<SumType>(mid) -  _sumImg.at<SumType>(scaleFst.y,scaleFst.br().x);

	res[2] = _sumImg.at<SumType>(scaleFst.br().y,mid.x) + _sumImg.at<SumType>(mid.y,scaleFst.x)
		- _sumImg.at<SumType>(mid) - _sumImg.at<SumType>(scaleFst.br().y,scaleFst.x);


	res[3] = _sumImg.at<SumType>(scaleFst.br()) + _sumImg.at<SumType>(mid)
		- _sumImg.at<SumType>(mid.y,scaleFst.br().x) - _sumImg.at<SumType>(scaleFst.br().y, mid.x);

	double sumRes = 1e-10;
	Mat result(FEATURE_SIZE, 1, CV_64FC1);

	for(int i = 0; i < 4; i++)
	{
		for(int j = 0; j < 8; j++)
		{
			result.at<double>(i * 8 + j,0) = res[i][j];
			sumRes += res[i][j]*res[i][j];
		}
	}

	CV_Assert(sumRes != 0);
	result = result / cv::sqrt(sumRes);

	return result;
}
Пример #6
0
void ColorTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);

    if (m_curBrightnessRadius == 0 && m_curContrastRadius == 0 && m_curSaturationRadius == 0)
        return;

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Пример #7
0
void IntensityTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);

    if (m_eigVal.empty() || m_eigVec.empty() || m_curStdDev == 0)
        return;

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Пример #8
0
void ColorTransformer::Apply(uint8_t, cv::Mat &mat)
{
    if (m_brightnessRadius == 0.0 && m_contrastRadius == 0.0 && m_saturationRadius == 0.0)
        return;

    // Have to convert to float
    ConvertToFloatingPointIfRequired(mat);

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Пример #9
0
void IntensityTransformer::Apply(uint8_t, cv::Mat &mat)
{
    if (m_eigVal.empty() || m_eigVec.empty() || m_stdDev == 0.0)
        return;

    // Have to convert to float.
    int type = m_precision == DataType::Float ? CV_32F : CV_64F;
    if (mat.type() != type)
        mat.convertTo(mat, type);

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Пример #10
0
cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj)
{
    CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
    CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

    Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3));
    const Affine3d* dpath = _traj.getMat().ptr<Affine3d>();
    const Affine3f* fpath = _traj.getMat().ptr<Affine3f>();

    if (_traj.depth() == CV_32F)
        for(int i = 0; i < points.cols; ++i)
            points.at<Vec3f>(i) = fpath[i].translation();

    if (_traj.depth() == CV_64F)
        for(int i = 0; i < points.cols; ++i)
            points.at<Vec3d>(i) = dpath[i].translation();

    return points;
}
void BagOfWordsSlic::ComputeVisualWordHistograms(int half_window_height, int half_window_width, const Mat& visual_word_map) {
    Mat accumulated_arrays(visual_word_map.size(),CV_32SC(KCENTERS));
    const unsigned short* visual_word_map_row;
    Vec50s* this_row;
    Vec50s* prev_row;
    int upper_val, left_val, upper_left_val;
    for(int i = 0; i < visual_word_map.rows; ++i) {
        visual_word_map_row = visual_word_map.ptr<unsigned short>(i);
        this_row = accumulated_arrays.ptr<Vec50s>(i);
        if(i > 0) prev_row = accumulated_arrays.ptr<Vec50s>(i - 1);
        for(int j = 0; j < visual_word_map.cols; ++j)
            for(int k = 0; k < KCENTERS; ++k) {
                upper_val = i > 0 ? prev_row[j][k] : 0;
                left_val = j > 0 ? this_row[j-1][k] : 0;
                upper_left_val = (i > 0 && j > 0) ? prev_row[j-1][k] : 0;
                this_row[j][k] = upper_val + left_val - upper_left_val + (k == visual_word_map_row[j]);
            }
    }
    visual_word_histogram_matrix_.create(visual_word_map.size(),CV_64FC(KCENTERS));
    Vec50d* visual_word_histogram_row;
    Vec50s* window_bottom;
    Vec50s* window_top;
    int top_ind, bottom_ind, left_ind, right_ind;
    double window_area;
    for(int i = 0; i < visual_word_histogram_matrix_.rows; ++i) {
        top_ind = max(i - half_window_height - 1, 0);
        window_top = accumulated_arrays.ptr<Vec50s>(top_ind);
        bottom_ind = min(i + half_window_height, im_height_ - 1);
        window_bottom = accumulated_arrays.ptr<Vec50s>(bottom_ind);
        visual_word_histogram_row = visual_word_histogram_matrix_.ptr<Vec50d>(i);
        for(int j = 0; j < visual_word_histogram_matrix_.cols; ++j) {
            left_ind = max(0, j - half_window_width - 1);
            right_ind = min(im_width_-1, j + half_window_width);
            visual_word_histogram_row[j] = window_bottom[right_ind] + window_top[left_ind] - (window_top[right_ind] + window_bottom[left_ind]);
            window_area = (right_ind - left_ind) * (bottom_ind - top_ind);
            visual_word_histogram_row[j] /= window_area;
        }
    }
}
void SurfFaceDetection::Init()
{
	maxImgSize = Size(2000,2000);

	Mat rowKernel(1, 3, CV_32F);
	rowKernel.at<float>(0,0) = -1;
	rowKernel.at<float>(0,1) = 0;
	rowKernel.at<float>(0,2) = 1;

	Mat colKernel(3, 1, CV_32F);
	colKernel.at<float>(0,0) = -1;
	colKernel.at<float>(1,0) = 0;
	colKernel.at<float>(2,0) = 1;

	rowFilter = createLinearFilter(CV_8UC1, CV_16SC1, rowKernel, Point(-1,-1),
		0.0, BORDER_REFLECT, BORDER_REFLECT);
	colFilter = createLinearFilter(CV_8UC1, CV_16SC1, colKernel, Point(-1,-1),
		0.0, BORDER_REFLECT, BORDER_REFLECT);

	sumCache = Mat(maxImgSize + Size(1,1), CV_64FC(8));
	srcScale = 1.0;
	//img.reserve(maxImgSize);
}
Пример #13
0
const std::vector<std::pair<int, char*> > Logger::initTypeStringMapping()
{
	std::vector<std::pair<int, char*> > imageTypeStringMapping;
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8U, (char*) "CV_8U"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC2, (char*) "CV_8UC2"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC3, (char*) "CV_8UC3"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC4, (char*) "CV_8UC4"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8S, (char*) "CV_8S"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_16U, (char*) "CV_16U"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_16S, (char*) "CV_16S"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32S, (char*) "CV_32S"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32F, (char*) "CV_32F"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC2, (char*) "CV_32FC2"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC3, (char*) "CV_32FC3"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC4, (char*) "CV_32FC4"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC(5), (char*) "CV_32FC5"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64F, (char*) "CV_64F"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC2, (char*) "CV_64FC2"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC3, (char*) "CV_64FC3"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC4, (char*) "CV_64FC4"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC(5), (char*) "CV_64FC5"));

	return imageTypeStringMapping;
}
Пример #14
0
const std::vector<std::pair<int, char*> > Logger::initPrimitiveStringMapping()
{
	std::vector<std::pair<int, char*> > imageTypeStringMapping;
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8U, (char*) "uchar"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC2, (char*) "cv::Vec2b"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC3, (char*) "cv::Vec3b"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8UC4, (char*) "cv::Vec4b"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_8S, (char*) "char"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_16U, (char*) "ushort"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_16S, (char*) "short"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32S, (char*) "int"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32F, (char*) "float"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC2, (char*) "cv::Vec2f"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC3, (char*) "cv::Vec3f"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC4, (char*) "cv::Vec4f"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_32FC(5), (char*) "cv::Vec<float, 5>"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64F, (char*) "double"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC2, (char*) "cv::Vec2d"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC3, (char*) "cv::Vec3d"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC4, (char*) "cv::Vec4d"));
	imageTypeStringMapping.push_back(std::pair<int, char*>(CV_64FC(5), (char*) "cv::Vec<double, 5>"));

	return imageTypeStringMapping;
}
Пример #15
0
        void TensorBoardFileWriter::WriteImage(const std::wstring& name, NDArrayViewPtr imageData, uint64_t step)
        {
            assert(imageData != nullptr);
            tensorflow::Event event;
            event.set_wall_time(static_cast<double>(std::time(0)));
            tensorflow::Summary* summary = event.mutable_summary();

            std::vector<size_t> dimensions = imageData->Shape().Dimensions();
            const size_t batch_size = dimensions.at(3);
            const size_t depth = dimensions.at(2);
            const size_t width = dimensions.at(1);
            const size_t height = dimensions.at(0);
            const DataType dtype = imageData->GetDataType();

            std::vector<size_t> start(4, 0);
            std::vector<size_t> extent;
            extent.push_back(height);
            extent.push_back(width);
            extent.push_back(depth);
            extent.push_back(1);
            const int compression = -1;
            
            const std::vector<size_t> imageDim({height, width, depth});
            NDShape imageShape(imageDim);

            for (size_t i = 0; i < batch_size; i++) {
                tensorflow::Summary::Value* summaryValue = summary->add_value();
                summaryValue->set_tag(ToString(name) + "/image/" + std::to_string(i));

                tensorflow::Summary::Image* summaryImage = summaryValue->mutable_image();
                summaryImage->set_height(height);
                summaryImage->set_width(width);
                summaryImage->set_colorspace(depth);
                start.back() = static_cast<size_t>(i);
                auto image = imageData->SliceView(start, extent)->AsShape(imageDim);
                vector<uchar> buffer;

                switch (dtype)
                {
                case DataType::Float:
                    WriteImageToBuffer(image->WritableDataBuffer<float>(), height, width, CV_32FC(depth), buffer);
                    break;
                
                case DataType::Double:
                    WriteImageToBuffer(image->WritableDataBuffer<double>(), height, width, CV_64FC(depth), buffer);
                    break;

                default:
                    fprintf(stderr, "TensorBoardFileWriter: Unsupported data type: %d ", static_cast<int>(dtype));
                    break;
                }

                string str(buffer.begin(), buffer.end());
                summaryImage->set_encoded_image_string(str);
            }
            
            WriteRecord(Serialize(event));
        }
Пример #16
0
void 
mexFunction(int nlhs, mxArray *plhs[], 
            int nrhs, const mxArray *prhs[])
{
	// simple handling of the expected input arguments from matlab
	
	// these checks would ideally be dependent on your code functionality
    if (nrhs == 0)
    {
        mexErrMsgTxt("An image is required!");
    }
    
    if(mxGetNumberOfDimensions(prhs[0]) != 3) 
    {
        mexErrMsgTxt("Input image should be colored.");
    }

    if(!mxIsDouble(prhs[0]))
    {
        mexErrMsgTxt("Input image should be of Double type.");
    }
    if(nrhs > 1)
    {
        mexErrMsgTxt("Only one input argument is required.");
    } 
    
    // determine input image properties
    const int *dims    = mxGetDimensions(prhs[0]);
    const int nDims    = mxGetNumberOfDimensions(prhs[0]);
    const int rows     = dims[0];
    const int cols     = dims[1];
    const int channels = (nDims == 3 ? dims[2] : 1);
    
    // Allocate, copy, and convert the input image
    // @note: input is double
    cv::Mat inImage = cv::Mat::zeros(cv::Size(cols, rows), CV_64FC(channels));
    mexPrintf("Got the Image \n");
    
	// use the helper library to copy the data from input mxArray to cv::Mat
	om::copyMatrixToOpencv(mxGetPr(prhs[0]), inImage);
    mexPrintf("Converted to Mat \n");
    inImage.convertTo(inImage, CV_8U);
	// create a single channel Matrix to store the output grayscale image
    cv::Mat outImage = cv::Mat::zeros(rows, cols, CV_8UC1);
    
	// convert the image to grayscale
	cv::cvtColor(inImage, outImage, CV_RGB2GRAY);
    mexPrintf("Applied the operations \n");

	int outDims[2];
	outDims[0] = outImage.rows;
	outDims[1] = outImage.cols;
	int outNDims = 2;

    // Convert opencv to Matlab and set as output
    plhs[0] = mxCreateNumericArray(outNDims, outDims, mxUINT8_CLASS, mxREAL);
    
	// use the helper library to copy the cv::Mat data to mxArray for return
	om::copyMatrixToMatlab<unsigned char>(outImage, (unsigned char*)mxGetPr(plhs[0]));
    mexPrintf("Converted to mxArry for return \n");
}
Пример #17
0
void GraphCut::generateGMMProbability(){
    const int kMeansItCount = 10;
    const int kMeansType = KMEANS_PP_CENTERS;

    this->generateInitGuessMask();
    std::vector<std::vector<cv::Vec3f> > samples;
    //声明各个sample
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        std::vector<cv::Vec3f> singleSample;
        samples.push_back(singleSample);
    }
    //为各个sample添加成员
    for(int y_offset = 0; y_offset < this->rawImage.rows; y_offset++){
        for(int x_offset = 0; x_offset < this->rawImage.cols; x_offset++){
            int currentGuessMask = this->initGuessMask.at<int>(y_offset,x_offset);
            samples.at(currentGuessMask).push_back((cv::Vec3f)this->rawImage.at<cv::Vec3b>(y_offset,x_offset));
        }
    }
    std::cout << "添加sample结束" << std::endl;
    //验证每个sample都不为空
    for(std::vector<cv::Vec3f> elem : samples){
        assert(elem.empty() == false);
    }

    //通过kmeans为每个sample中的成员指定一个label
    std::vector<cv::Mat > mat_samples;
    std::vector<cv::Mat > mat_samples_label;
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        cv::Mat mat_sample((int)samples.at(i).size(), 3, CV_32F, &samples.at(i)[0][0] );
        cv::Mat label;
        cv::kmeans( mat_sample, GMM::componentsCount, label,TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType );
        mat_samples.push_back(mat_sample);
        mat_samples_label.push_back(label);
    }
    std::cout << "kmeans结束" << std::endl;

    this->GMMProbability = cv::Mat(this->rawImage.rows, this->rawImage.cols, CV_64FC(this->CLASS_NUMBER));
    std::vector<cv::Mat> GMMModels;
    std::vector<GMM> GMMs;
    //初始化GMM
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        cv::Mat tempModel;
        GMM tempGMM(tempModel);
        tempGMM.initLearning();
        GMMModels.push_back(tempModel);
        GMMs.push_back(tempGMM);
    }

    //验证每个sample的样本数与label数相等
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        assert(samples.at(i).size() == static_cast<size_t>(mat_samples_label.at(i).rows));
    }

    //把每个sample加入到GMM中
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        for(size_t sampleIndex = 0; sampleIndex < samples.at(i).size(); sampleIndex++){
            GMMs.at(i).addSample(mat_samples_label.at(i).at<int>(sampleIndex,0), samples.at(i).at(sampleIndex));
        }
    }
    //GMM训练结束
    for(int i = 0; i < this->CLASS_NUMBER; i++){
        GMMs.at(i).endLearning();
    }
    std::cout << "GMM训练结束" << std::endl;

    //为每个GMMProbability位置赋值,值的含义为,当前像素属于类i的概率
    for(int y_offset = 0; y_offset < this->GMMProbability.rows; y_offset++){
        for(int x_offset = 0; x_offset < this->GMMProbability.cols; x_offset++){
            cv::Vec3f currentVec3f = (cv::Vec3f)this->rawImage.at<cv::Vec3b>(y_offset,x_offset);
            for(int i = 0; i < this->CLASS_NUMBER; i++){
                ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
                this->GMMProbability.at<cv::Vec<double,3> >(y_offset,x_offset)[i] = GMMs.at(i).operator ()(currentVec3f);
            }
        }
    }
    std::cout << "GMMProbability赋值结束" << std::endl;

}
void BagOfWordsSlic::GenerateSuperpixels(Mat _edges, InputArray _input_image, OutputArray _superpixels,
        int _number_of_superpixels, InputArray _visual_word_map,
        InputArray _mask, OutputArray _superpixel_centroids) {

    cout << "generating super pixels" << endl;
    /*--Convert from input arguments to class data-structures--*/
    input_image_ = _input_image.getMat();
    Mat visual_word_map = _visual_word_map.getMat();
    Mat mask = _mask.getMat();

    cvtColor(mask, mask, CV_BGR2GRAY);
    im_height_ = input_image_.rows;
    im_width_ = input_image_.cols;
    num_superpixels_ = _number_of_superpixels;

    // Compute step size
    S_ = sqrt(im_height_*im_width_/num_superpixels_);

    /*--Get Lab Image--*/
    Mat lab_image(input_image_.size(), CV_8UC3);
    cvtColor(input_image_, lab_image, COLOR_BGR2Lab);

    /*--Initialize centroid locations to regular grid with computed step size--*/
    int grid_width = ceil((float)im_width_/S_);
    int grid_height = ceil((float)im_height_/S_);
    num_superpixels_  = grid_width*grid_height;
    image_oversegmentation_ = new Oversegmentation(input_image_);
    cluster_centroids_.resize(num_superpixels_);

    int index_cluster = 0;

    for(int i = 0; i < cluster_centroids_.size(); ++i) {
        int offset = 0;
        if (((i-1)/grid_width)%2 == 0)
            offset = S_/4;
        else
            offset = 3*S_/4;
        int x = offset + ((i-1)%grid_width)*S_;
        int y = offset + ((i-1)/grid_width)*S_;
        int s_ = ceil((float)S_/2);

        if ((int)(mask.at<uchar>(y,x)) == 1
                && mask.at<uchar>(min(y+s_, im_height_),min(x+s_, im_width_)) == 1
                && mask.at<uchar>(max(y-s_, 0), max(x-s_,0)) ==1
                && mask.at<uchar>(max(y-s_, 0), min(x+s_, im_width_))==1
                && mask.at<uchar>(min(y+s_, im_height_), max(x-s_, 0))==1) {
//            cluster_centroids_[i].pt_.x = offset + ((i-1)%grid_width)*S_;
//            cluster_centroids_[i].pt_.y = offset + ((i-1)/grid_width)*S_;
            cluster_centroids_[i].pt_.x = x;
            cluster_centroids_[i].pt_.y = y;
        }
        index_cluster++;
    }

    // Find local minimum of gradient magnitude and adjust centroid locations
    MoveCentroidsToLocalGradientMinima();

    for(int i = 0; i < cluster_centroids_.size(); ++i)
        image_oversegmentation_->AddNewSegmentAt(cluster_centroids_[i].pt_);

    if (visual_word_map.empty()) {
        visual_word_histogram_matrix_.create(input_image_.size(),CV_64FC(KCENTERS));
    } else {
        //Compute visual_word histograms
        ComputeVisualWordHistograms(5,5,visual_word_map);
    }

    /*---Generate descriptors at centroid locations---*/

    // Get L,A,B vector for each centroid
    for(int i = 0; i < cluster_centroids_.size(); ++i) {
        cluster_centroids_[i].lab_color_  = lab_image.at<Vec3b>(image_oversegmentation_->SegmentCentroid(i));
        cluster_centroids_[i].visual_word_histogram_ = visual_word_histogram_matrix_.at<Vec50d>(image_oversegmentation_->SegmentCentroid(i));
    }

    /*--Initialize distance to nearest centroid for each pixel--*/
    distance_matrix_.create(input_image_.size(), CV_64F);
    double dist;
    int* superpixel_label_matrix_row;
    Vec50d* visual_word_histogram_row;
    double* distance_matrix_row;
    Vec3b* lab_image_row;

    /*---First loop starts: for iter = 1 to kMaxIter---*/
    for(int iter = 0; iter < kMaxIter; ++iter) {

        /* Reset distances */
        distance_matrix_.setTo(DBL_MAX);

        cout << "EM iteration: " << iter << "\n";
        cout << "Number of segments now:   " << image_oversegmentation_->NumberOfSegments() << "\n";

        int x_lower_limit, x_upper_limit, y_lower_limit, y_upper_limit;
        /*---Second loop starts: for cInd = 1 to num_superpixels_---*/
        for(int i = 0; i < num_superpixels_; ++i) {
            int centroid_x = cluster_centroids_[i].pt_.x, centroid_y = cluster_centroids_[i].pt_.y;
            /*---Third loop starts: Iterate through each pixel in 2S+1 x 2S+1 window size around centroid[i]---*/
            y_lower_limit = max(centroid_y - S_,0);
            y_upper_limit = min(centroid_y + S_,im_height_);
            x_lower_limit = max(centroid_x - S_,0);
            x_upper_limit = min(centroid_x + S_,im_width_);

            for(int pixel_y = y_lower_limit; pixel_y < y_upper_limit; pixel_y++) {

                lab_image_row = lab_image.ptr<Vec3b>(pixel_y);
                distance_matrix_row = distance_matrix_.ptr<double>(pixel_y);
                visual_word_histogram_row = visual_word_histogram_matrix_.ptr<Vec50d>(pixel_y);
                superpixel_label_matrix_row = image_oversegmentation_->pixel_labels_.ptr<int>(pixel_y);

                int temp_x = x_lower_limit;
                for(int pixel_x = x_lower_limit; pixel_x < x_upper_limit; ++pixel_x) {

                    if (mask.at<uchar>(pixel_y, pixel_x) != 1)
                        continue;
                    //Compute the pixel's distance to centroid[i]
                    ClusterPoint pixel(Point2f(pixel_x,pixel_y), lab_image_row[pixel_x], visual_word_histogram_row[pixel_x]);
                    if (visual_word_map.empty()) {
                        dist = cluster_centroids_[i].distance_to(pixel, m_, S_, 0);
                    } else {
                        dist = cluster_centroids_[i].distance_to(pixel, m_, S_, kHistogramDistanceWeight);
                    }
                    /*---Update the superpixel[pixel] and distance[pixel] if required---*/
                    if(dist < distance_matrix_row[pixel_x]) {
                        distance_matrix_row[pixel_x] = dist;
                        superpixel_label_matrix_row[pixel_x] = i;
                    }
                }
            }
            /*---Third loop ends---*/
        }/*---Second loop ends---*/
        image_oversegmentation_->ComputeSegmentAreas();

        //Create vector of flags to indicate discardedsuperpixel_labels
        vector<bool> discard_list(num_superpixels_,false);

        /*---Fourth loop: iterate through each centroid(superpixel) and count number of pixels within.
        If count is too small, mark superpixel for discarding---*/
        for(int i = 0; i < num_superpixels_; ++i) {
            if (discard_list[i] != 1) {
                discard_list[i] = image_oversegmentation_->SegmentArea(i) < kMinSuperpixelAreaThreshold;

            }
        }

        int num_discarded = 0;
        for(int i = 0; i < discard_list.size(); ++i)
            if(discard_list[i])
                ++num_discarded;

        image_oversegmentation_->DeleteSegments(discard_list);
        num_superpixels_ = image_oversegmentation_->NumberOfSegments();

        vector<Point> old_centroids = image_oversegmentation_->GetCentroids();
        UpdateClusterCentroids(lab_image);
        vector<Point> new_centroids = image_oversegmentation_->GetCentroids();

        /*---Check for convergence - if converged, then break from loop---*/
        int max_centroid_displacement = -1;
        for(int i = 0; i < num_superpixels_ ; ++i) {
            int x_difference = abs(old_centroids[i].x-new_centroids[i].x);
            int y_difference = abs(old_centroids[i].y-new_centroids[i].y);
            max_centroid_displacement = std::max(max_centroid_displacement,x_difference);
            max_centroid_displacement = std::max(max_centroid_displacement,y_difference);
        }

        cout << "max distance:  " << max_centroid_displacement << "\n";
        if (max_centroid_displacement <= kCentroidErrorThreshold) {
            RenumberEachConnectedComponent();
            RelabelSmallSegmentsToNearestNeighbor(kMinSuperpixelAreaThreshold);
            cout << "Number of segments now:   " << image_oversegmentation_->NumberOfSegments() << "\n";

            break;
        }

        /*---First loop ends---*/
    }
    image_oversegmentation_->pixel_labels_.copyTo(_superpixels);

    vector<Point> centroids = image_oversegmentation_->GetCentroids();
    _superpixel_centroids.create(centroids.size(), 2, CV_32S);
    Mat superpixel_centroids = _superpixel_centroids.getMat();

    for (int i = 0; i < centroids.size(); ++i) {
        superpixel_centroids.at<int>(i,0) = centroids[i].x;
        superpixel_centroids.at<int>(i,1) = centroids[i].y;
    }
    _input_image.copyTo(image_oversegmentation_->_original_image);
    visual_word_map.copyTo(image_oversegmentation_->Texton_image);
    _edges.copyTo(image_oversegmentation_->_edges);
    image_oversegmentation_->ListPixelsForEachSegment();
//    Mat src = _input_image.getMat();
    //cout << "where2" << endl;
//    image_oversegmentation_->ComputeSegmentFeatures(src, visual_word_map, _edges);
    image_oversegmentation_->ShowClassifiedLabelImage(mask);

    cout << "where3" << endl;
//    cout << "total num superpixels: " << centroids.size() << endl;
//    _number_of_superpixels_total = centroids.size();
    return;
    /*---Clean up image_oversegmentation_->pixel_labels_---*/

}