예제 #1
0
void load(Archive &ar, cv::Mat &mat)
{
    int rows, cols, type;
    bool continuous;
    ar &rows &cols &type &continuous;

    if (continuous)
    {
        mat.create(rows, cols, type);
        const int data_size = rows * cols * static_cast<int>(mat.elemSize());
        auto mat_data = cereal::binary_data(mat.ptr(), data_size);
        ar &mat_data;
    }

    else
    {
        mat.create(rows, cols, type);
        const int row_size = cols * static_cast<int>(mat.elemSize());

        for (int i = 0; i < rows; i++)
        {
            auto row_data = cereal::binary_data(mat.ptr(i), row_size);
            ar &row_data;
        }
    }
}
예제 #2
0
bool TrainProcessor::fillSamples(const std::string &inputName, cv::Mat &samples, cv::Mat &categories) {
	FILE* f = fopen(inputName.c_str(), "rt");
	if (f == 0) {
		return false;
	}

	cv::Mat image, gray, resized, resizedFloat;

	std::vector<cv::Mat> imagesVector;
	std::vector<char> categoriesVector;

	char buf[1000];
	while (fgets(buf, 1000, f)) {
		int len = (int) strlen(buf);
		while (len > 0 && isspace(buf[len-1])) {
			len--;
		}
		buf[len] = '\0';
		std::string imageName(buf);
		char cat = category(imageName);

		std::cout << "file " << imageName.c_str() << std::endl;
		if (cat == '\0') {
			std::cout << "WARNING: no category detected" << std::endl;
			std::cout << std::endl;
			continue;
		} else if (myClassesList.find(cat) == std::string::npos) {
			std::cout << "WARNING: unknown category detected" << std::endl;
			std::cout << std::endl;
			continue;
		}
		image = cv::imread(imageName, 1);
		if (!image.empty()) {
			cv::cvtColor(image, gray, CV_BGR2GRAY);
			cv::resize(gray, resized, myFaceSize, 0, 0, cv::INTER_LINEAR);
			cv::equalizeHist(resized, resized);

			resized.convertTo(resizedFloat, CV_32FC1, 1.0 / 255);

			imagesVector.push_back(cv::Mat());
			cv::Mat &vec = imagesVector.back();
			resizedFloat.reshape(0, 1).copyTo(vec);

			categoriesVector.push_back(cat);
		} else {
			std::cout << "WARNING: unable to read file" << std::endl;
			std::cout << std::endl;
		}
	}
	fclose(f);

	samples.create(imagesVector.size(), myFaceSize.width * myFaceSize.height, CV_32FC1);
	categories.create(imagesVector.size(), 1, CV_8UC1);
	for (size_t i = 0; i < imagesVector.size(); ++i) {
		cv::Mat rowi = samples.row(i);
		imagesVector[i].copyTo(rowi);
		categories.at<unsigned char>(i, 0) = categoriesVector.at(i);
	}
	return true;
}
void Retina::_convertValarrayGrayBuffer2cvMat(const std::valarray<double> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, cv::Mat &outBuffer)
{
	// fill output buffer with the valarray buffer
	const double *valarrayPTR=get_data(grayMatrixToConvert);
	if (!colorMode)
	{
		outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U);
		for (unsigned int i=0;i<nbRows;++i)
		{
			for (unsigned int j=0;j<nbColumns;++j)
			{
				cv::Point2d pixel(j,i);
				outBuffer.at<unsigned char>(pixel)=(unsigned char)*(valarrayPTR++);
			}
		}
	}else
	{
		const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2;
		outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3);
		for (unsigned int i=0;i<nbRows;++i)
		{
			for (unsigned int j=0;j<nbColumns;++j,++valarrayPTR)
			{
				cv::Point2d pixel(j,i);
				cv::Vec3b pixelValues;
				pixelValues[2]=(unsigned char)*(valarrayPTR);
				pixelValues[1]=(unsigned char)*(valarrayPTR+_retinaFilter->getOutputNBpixels());
				pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels);

				outBuffer.at<cv::Vec3b>(pixel)=pixelValues;
			}
		}
	}
}
예제 #4
0
파일: LBSP.cpp 프로젝트: caomw/litiv
static inline void lbsp_computeImpl(const cv::Mat& oInputImg, const cv::Mat& oRefImg, const std::vector<cv::KeyPoint>& voKeyPoints,
                                    cv::Mat& oDesc, bool bSingleColumnDesc, size_t nThreshold) {
    static_assert(LBSP::DESC_SIZE==2,"bad assumptions in impl below");
    CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
    CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
    const size_t nChannels = (size_t)oInputImg.channels();
    const cv::Mat& oRefMat = oRefImg.empty()?oInputImg:oRefImg;
    CV_DbgAssert(oInputImg.isContinuous() && oRefMat.isContinuous());
    const size_t nKeyPoints = voKeyPoints.size();
    if(nChannels==1) {
        if(bSingleColumnDesc)
            oDesc.create((int)nKeyPoints,1,CV_16UC1);
        else
            oDesc.create(oInputImg.size(),CV_16UC1);
        for(size_t k=0; k<nKeyPoints; ++k) {
            const int x = (int)voKeyPoints[k].pt.x;
            const int y = (int)voKeyPoints[k].pt.y;
            LBSP::computeDescriptor<1>(oInputImg,oRefMat.at<uchar>(y,x),x,y,0,nThreshold,oDesc.at<ushort>((int)k));
        }
        return;
    }
    else { //nChannels==3
        if(bSingleColumnDesc)
            oDesc.create((int)nKeyPoints,1,CV_16UC3);
        else
            oDesc.create(oInputImg.size(),CV_16UC3);
        const std::array<size_t,3> anThreshold = {nThreshold,nThreshold,nThreshold};
        for(size_t k=0; k<nKeyPoints; ++k) {
            const int x = (int)voKeyPoints[k].pt.x;
            const int y = (int)voKeyPoints[k].pt.y;
            const uchar* acRef = oRefMat.data+oInputImg.step.p[0]*y+oInputImg.step.p[1]*x;
            LBSP::computeDescriptor(oInputImg,acRef,x,y,anThreshold,((ushort*)(oDesc.data+oDesc.step.p[0]*k)));
        }
    }
}
unsigned long HeadDetectorNode::convertPclMessageToMat(const sensor_msgs::PointCloud2::ConstPtr& pointcloud, cv::Mat& depth_image, cv::Mat& color_image)
{
	pcl::PointCloud<pcl::PointXYZRGB> depth_cloud; // point cloud
	pcl::fromROSMsg(*pointcloud, depth_cloud);
	depth_image.create(depth_cloud.height, depth_cloud.width, CV_32FC3);
	color_image.create(depth_cloud.height, depth_cloud.width, CV_8UC3);
	uchar* depth_image_ptr = (uchar*) depth_image.data;
	uchar* color_image_ptr = (uchar*) color_image.data;
	for (int v=0; v<(int)depth_cloud.height; v++)
	{
		int depth_base_index = depth_image.step*v;
		int color_base_index = color_image.step*v;
		for (int u=0; u<(int)depth_cloud.width; u++)
		{
			int depth_index = depth_base_index + 3*u*sizeof(float);
			float* depth_data_ptr = (float*)(depth_image_ptr+depth_index);
			int color_index = color_base_index + 3*u*sizeof(uchar);
			uchar* color_data_ptr = (uchar*)(color_image_ptr+color_index);
			pcl::PointXYZRGB point_xyz = depth_cloud(u,v);
			depth_data_ptr[0] = point_xyz.x;
			depth_data_ptr[1] = point_xyz.y;
			depth_data_ptr[2] = (isnan(point_xyz.z)) ? 0.f : point_xyz.z;
			color_data_ptr[0] = point_xyz.r;
			color_data_ptr[1] = point_xyz.g;
			color_data_ptr[2] = point_xyz.b;
			//if (u%100 == 0) std::cout << "u" << u << " v" << v << " z" << data_ptr[2] << "\n";
		}
	}
	return ipa_Utils::RET_OK;
}
예제 #6
0
// create a map for cv::remap()
void CreateMap(const cv::Mat& TriangleMap, const std::vector<cv::Mat>& HomMatrices, cv::Mat& map_x, cv::Mat& map_y)
{
	assert(TriangleMap.type() == CV_32SC1);

	// Allocate cv::Mat for the map
	map_x.create(TriangleMap.size(), CV_32FC1);
	map_y.create(TriangleMap.size(), CV_32FC1);

	// Compute inverse matrices
	std::vector<cv::Mat> invHomMatrices(HomMatrices.size());
	for(int i=0; i<HomMatrices.size(); i++){
		invHomMatrices[i] = HomMatrices[i].inv();
	}

	for(int y=0; y<TriangleMap.rows; y++){
		for(int x=0; x<TriangleMap.cols; x++){
			int idx = TriangleMap.at<int>(y,x)-1;
			if(idx >= 0){
				cv::Mat H = invHomMatrices[TriangleMap.at<int>(y,x)-1];
				float z = H.at<float>(2,0) * x + H.at<float>(2,1) * y + H.at<float>(2,2);
				if(z==0)
					z = 0.00001;
				map_x.at<float>(y,x) = (H.at<float>(0,0) * x + H.at<float>(0,1) * y + H.at<float>(0,2)) / z;
				map_y.at<float>(y,x) = (H.at<float>(1,0) * x + H.at<float>(1,1) * y + H.at<float>(1,2)) / z;
			}
			else{
				map_x.at<float>(y,x) = x;
				map_y.at<float>(y,x) = y;
			}
		}
	}
}
예제 #7
0
//Attention: this method should be called inside pthread_mutex_lock(m_nextFrameMutex) only
void CvCapture_Android::prepareCacheForYUV(int width, int height)
{
    if (width != m_width || height != m_height)
    {
        LOGD("CvCapture_Android::prepareCacheForYUV: Changing size of buffers: from width=%d height=%d to width=%d height=%d", m_width, m_height, width, height);
        m_width = width;
        m_height = height;
        /*
        unsigned char *tmp = m_frameYUV420next;
        m_frameYUV420next = new unsigned char [width * height * 3 / 2];
        if (tmp != NULL)
        {
            delete[] tmp;
        }

        tmp = m_frameYUV420;
        m_frameYUV420 = new unsigned char [width * height * 3 / 2];
        if (tmp != NULL)
        {
            delete[] tmp;
        }*/
        m_frameYUV420.create(height * 3 / 2, width, CV_8UC1);
        m_frameYUV420next.create(height * 3 / 2, width, CV_8UC1);
    }
}
예제 #8
0
파일: denseSift.cpp 프로젝트: cmkyec/ScSPM
static void gen_dgauss(double sigma, cv::Mat& GX, cv::Mat& GY)
{
	int f_wid = 4 * cvCeil(sigma) + 1;
	cv::Mat kernel_separate = cv::getGaussianKernel(f_wid, sigma, CV_64F);
	cv::Mat kernel = kernel_separate * kernel_separate.t();
	GX.create(kernel.size(), kernel.type());
	GY.create(kernel.size(), kernel.type());
	for (int r = 0; r < kernel.rows; ++r) {
		for (int c = 0; c < kernel.cols; ++c) {
			if (c == 0) {
				GX.at<double>(r, c) = kernel.at<double>(r, c + 1) - kernel.at<double>(r, c);
			}
			else if (c == kernel.cols - 1) {
				GX.at<double>(r, c) = kernel.at<double>(r, c) - kernel.at<double>(r, c - 1);
			}
			else {
				GX.at<double>(r, c) = (kernel.at<double>(r, c + 1) -
					kernel.at<double>(r, c - 1)) / 2;
			}
			if (r == 0) {
				GY.at<double>(r, c) = kernel.at<double>(r + 1, c) - kernel.at<double>(r, c);
			}
			else if (r == kernel.rows - 1) {
				GY.at<double>(r, c) = kernel.at<double>(r, c) - kernel.at<double>(r - 1, c);
			}
			else {
				GY.at<double>(r, c) = (kernel.at<double>(r + 1, c) -
					kernel.at<double>(r - 1, c)) / 2;
			}
		}
	}
	GX = GX * 2 / cv::sum(cv::abs(GX))[0];
	GY = GY * 2 / cv::sum(cv::abs(GY))[0];
}
예제 #9
0
파일: min.part.cpp 프로젝트: caomw/XTAM
void Cost::minv(float* _data, cv::Mat& _minIndex, cv::Mat& _minValue){
    assert(_minIndex.type() == CV_32SC1);
    int r = rows;
    int c = cols;
    int l = layers;

    _minIndex.create(rows,cols,CV_32SC1);
    _minValue.create(rows,cols,CV_32FC1);

    float* data = (float*)( _data);
    int* minIndex = (int*)(_minIndex.data);
    float* minValue = (float*)(_minValue.data);
    
    for(int i = 0, id = 0; i < r*c; i++){//i is offset in 2d, id is offset in 3d
        //first element is min so far
        int mi = 0;
        float mv = data[id];
        id++;
        for (int il = 1; il < l; il++, id++){ //il is layer index
            float v = data[id];
            if(mv > v){
                mi = il;
                mv = v;
            }
        }
        minIndex[i] = mi; 
        minValue[i] = mv; 
    }
}
예제 #10
0
static void globalMattingHelper(cv::Mat _image, cv::Mat _trimap, cv::Mat &_foreground, cv::Mat &_alpha, cv::Mat &_conf)
{
    const cv::Mat_<cv::Vec3b> &image = (const cv::Mat_<cv::Vec3b>&)_image;
    const cv::Mat_<uchar> &trimap = (const cv::Mat_<uchar>&)_trimap;

    std::vector<cv::Point> foregroundBoundary = findBoundaryPixels(trimap, 255, 128);
    std::vector<cv::Point> backgroundBoundary = findBoundaryPixels(trimap, 0, 128);

    int n = (int)(foregroundBoundary.size() + backgroundBoundary.size());
    for (int i = 0; i < n; ++i)
    {
        int x = rand() % trimap.cols;
        int y = rand() % trimap.rows;

        if (trimap(y, x) == 0)
            backgroundBoundary.push_back(cv::Point(x, y));
        else if (trimap(y, x) == 255)
            foregroundBoundary.push_back(cv::Point(x, y));
    }

    std::sort(foregroundBoundary.begin(), foregroundBoundary.end(), IntensityComp(image));
    std::sort(backgroundBoundary.begin(), backgroundBoundary.end(), IntensityComp(image));

    std::vector<std::vector<Sample> > samples;
    calculateAlphaPatchMatch(image, trimap, foregroundBoundary, backgroundBoundary, samples);

    _foreground.create(image.size(), CV_8UC3);
    _alpha.create(image.size(), CV_8UC1);
    _conf.create(image.size(), CV_8UC1);

    cv::Mat_<cv::Vec3b> &foreground = (cv::Mat_<cv::Vec3b>&)_foreground;
    cv::Mat_<uchar> &alpha = (cv::Mat_<uchar>&)_alpha;
    cv::Mat_<uchar> &conf = (cv::Mat_<uchar>&)_conf;

    for (int y = 0; y < alpha.rows; ++y)
        for (int x = 0; x < alpha.cols; ++x)
        {
            switch (trimap(y, x))
            {
                case 0:
                    alpha(y, x) = 0;
                    conf(y, x) = 255;
                    foreground(y, x) = 0;
                    break;
                case 128:
                {
                    alpha(y, x) = 255 * samples[y][x].alpha;
                    conf(y, x) = 255 * exp(-samples[y][x].cost / 6);
                    cv::Point p = foregroundBoundary[samples[y][x].fi];
                    foreground(y, x) = image(p.y, p.x);
                    break;
                }
                case 255:
                    alpha(y, x) = 255;
                    conf(y, x) = 255;
                    foreground(y, x) = image(y, x);
                    break;
            }
        }
}
예제 #11
0
파일: LBSP.cpp 프로젝트: caomw/litiv
void LBSP::reshapeDesc(cv::Size oSize, const std::vector<cv::KeyPoint>& voKeypoints, const cv::Mat& oDescriptors, cv::Mat& oOutput) {
    static_assert(LBSP::DESC_SIZE==2,"bad assumptions in impl below");
    CV_DbgAssert(!voKeypoints.empty());
    CV_DbgAssert(!oDescriptors.empty() && oDescriptors.cols==1);
    CV_DbgAssert(oSize.width>0 && oSize.height>0);
    CV_DbgAssert(oDescriptors.type()==CV_16UC1 || oDescriptors.type()==CV_16UC3);
    const size_t nChannels = (size_t)oDescriptors.channels();
    const size_t nKeyPoints = voKeypoints.size();
    if(nChannels==1) {
        oOutput.create(oSize,CV_16UC1);
        oOutput = cv::Scalar_<ushort>(0);
        for(size_t k=0; k<nKeyPoints; ++k)
            oOutput.at<ushort>(voKeypoints[k].pt) = oDescriptors.at<ushort>((int)k);
    }
    else { //nChannels==3
        oOutput.create(oSize,CV_16UC3);
        oOutput = cv::Scalar_<ushort>(0,0,0);
        for(size_t k=0; k<nKeyPoints; ++k) {
            ushort* output_ptr = (ushort*)(oOutput.data + oOutput.step.p[0]*(int)voKeypoints[k].pt.y);
            const ushort* const desc_ptr = (ushort*)(oDescriptors.data + oDescriptors.step.p[0]*k);
            const size_t idx = 3*(int)voKeypoints[k].pt.x;
            for(size_t n=0; n<3; ++n)
                output_ptr[idx+n] = desc_ptr[n];
        }
    }
}
예제 #12
0
파일: LBSP.cpp 프로젝트: dcardenasnl/Test
static inline void lbsp_computeImpl(    const cv::Mat& oInputImg,
                                        const cv::Mat& oRefImg,
                                        const std::vector<cv::KeyPoint>& voKeyPoints,
                                        cv::Mat& oDesc,
                                        size_t _t) {
    CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
    CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
    CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
    const size_t nChannels = (size_t)oInputImg.channels();
    const size_t _step_row = oInputImg.step.p[0];
    const uchar* _data = oInputImg.data;
    const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
    const size_t nKeyPoints = voKeyPoints.size();
    if(nChannels==1) {
        oDesc.create((int)nKeyPoints,1,CV_16UC1);
        for(size_t k=0; k<nKeyPoints; ++k) {
            const int _x = (int)voKeyPoints[k].pt.x;
            const int _y = (int)voKeyPoints[k].pt.y;
            const uchar _ref = _refdata[_step_row*(_y)+_x];
            ushort& _res = oDesc.at<ushort>((int)k);
            #include "LBSP_16bits_dbcross_1ch.i"
        }
    }
    else { //nChannels==3
        oDesc.create((int)nKeyPoints,1,CV_16UC3);
        for(size_t k=0; k<nKeyPoints; ++k) {
            const int _x = (int)voKeyPoints[k].pt.x;
            const int _y = (int)voKeyPoints[k].pt.y;
            const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
            ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*k));
            #include "LBSP_16bits_dbcross_3ch1t.i"
        }
    }
}
예제 #13
0
파일: test2.cpp 프로젝트: lvst09/ringmaster
/** @函数 main */
int test2main()
{
    /// 装载图像
    src = imread("/Users/sky/Downloads/daily_delete/aaa/Movie12347.MOV_MYIMG_ORI0.JPG");
    myhist(&src);
//    myback();
    /// 等待用户反应
//    waitKey(0);
    
//    return 0;
    if( !src.data )
    { return -1; }
    
    cv::Mat threshold_output;
    
    /// 创建与src同类型和大小的矩阵(dst)
    dst.create( src.size(), src.type() );
    
    
    cvtColor( src, hsv, CV_BGR2HSV );
    
    /// 分离 Hue 通道
    hue.create( hsv.size(), hsv.depth() );
    int ch[] = { 0, 0 };
    mixChannels( &hsv, 1, &hue, 1, ch, 1 );
    
    /// 原图像转换为灰度图像
//    cvtColor( hsv, src_gray, CV _BGR2GRAY );
//    for (int i = 0; i < 255; ++i) {
//        threshold(hue, threshold_output, i, 255, THRESH_BINARY );
//        int aa = i;
//        stringstream ss;
//        ss<<aa;
//        string s1 = ss.str();
//        cout<<s1<<endl; // 30
//        
//        string s2 = "/Users/sky/Downloads/daily_delete/Gray_Image" + s1 + ".jpg";
//        ss>>s2;
//        cout<<s2<<endl; // 30
//        imwrite("/Users/sky/Downloads/daily_delete/Gray_Image" + s1 + ".jpg", threshold_output);
//    }
//    threshold(hue, threshold_output, 0, 255, THRESH_OTSU );
//    imshow("calcHist Demo", threshold_output );
    
    /// 创建显示窗口
    namedWindow( window_name, CV_WINDOW_AUTOSIZE );
    
//    imshow("abc", threshold_output);
    /// 创建trackbar
    createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
    
    /// 显示图像
    CannyThreshold(0, 0);
    
    /// 等待用户反应
    waitKey(0);
    
    return 0;
}
예제 #14
0
        opt_person_recognition()
        {
            Scale[0]=0.5, Scale[1]=0.75, Scale[2]=1;

            Sigma.create(1, 1, CV_64FC1), Sigma.at<double>(0, 0)=0.6;

            Sigma_edge.create(1, 1, CV_64FC1), Sigma_edge.at<double>(0,0)=1;
        }
예제 #15
0
void Conversions::fromMsg(const std::vector<graph_slam_msgs::SensorData> &sensor_data, cv::Mat &features)
{
    // Count feature data points. Assumption: all have the same feature type.
    int feature_type = 0;
    int feature_count = 0;
    int descriptor_size = 0;
    for (auto data : sensor_data) {
        if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
            feature_type = data.features.descriptor_type;
            if (data.features.features.size() > 0) {
                descriptor_size = data.features.features[0].descriptor.size();
                feature_count += data.features.features.size();
            }
        }
    }

    if (feature_count > 0) {
        int k = 0;
        switch (feature_type) {
        case graph_slam_msgs::Features::BRIEF:
        case graph_slam_msgs::Features::ORB:
        case graph_slam_msgs::Features::BRISK:
        case graph_slam_msgs::Features::FREAK:
            features.create(feature_count, descriptor_size, CV_8U);
            for (auto data : sensor_data) {
                if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
                    for (unsigned int i = 0; i < data.features.features.size(); i++) {
                        for (int j = 0; j < descriptor_size; j++) {
                            float val = data.features.features[i].descriptor[j];
                            features.at<unsigned char>(k,j) = (unsigned char)val;
                        }
                        k++;
                    }
                }
            }
            break;
        case graph_slam_msgs::Features::SURF:
        case graph_slam_msgs::Features::SIFT:
            features.create(feature_count, descriptor_size, CV_32F);
            for (auto data : sensor_data) {
                if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
                    for (unsigned int i = 0; i < data.features.features.size(); i++) {
                        for (int j = 0; j < descriptor_size; j++) {
                            features.at<float>(k,j) = data.features.features[i].descriptor[j];
                        }
                        k++;
                    }
                }
            }
            break;
        default:
            ROS_ERROR_NAMED("feature_link_estimation", "unknown feature type: %d (LE)", feature_type);
            break;
        }
    }
}
/**
 Reads a frame.

 @return If no frame can be read or the frame read is not valid returns false, otherwise returns true.
*/
bool Kinect::readFrame(cv::Mat &frame, CameraMode camMode)
{
	VideoFrameRef irf;
	int hIr, wIr;

	VideoFrameRef colorf; 
	int hColor, wColor;


	switch (camMode)
	{
		case (NI_SENSOR_DEPTH):

			rc = depth.readFrame(&irf);
			if (irf.isValid())
			{
				const uint16_t* imgBufIr = (const uint16_t*)irf.getData();
				hIr = irf.getHeight();
        		wIr = irf.getWidth();
        		frame.create(hIr, wIr, CV_16U);
        		memcpy(frame.data, imgBufIr, hIr * wIr * sizeof(uint16_t));
        		frame.convertTo(frame, CV_8U);
				return true;
			}
			else
			{
				cout << "ERROR: Frame not valid." << endl;
				return false;
			}

		case (NI_SENSOR_COLOR):

			rc = color.readFrame(&colorf);
			if(colorf.isValid())
			{
				const openni::RGB888Pixel* imgBufColor = (const openni::RGB888Pixel*)colorf.getData();
				hColor = colorf.getHeight();
        		wColor = colorf.getWidth();
        		frame.create(hColor, wColor, CV_8UC3);
        		memcpy(frame.data, imgBufColor,  3 * hColor * wColor * sizeof(uint8_t));
        		cvtColor(frame, frame, CV_BGR2RGB);
        		return true;
			}
			else
			{
				cout << "ERROR: Frame not valid." << endl;
				return false;
			}

		default:
			cout << "ERROR: No frame to be read. Object not initialize." << endl;
			return false;
	}
}
예제 #17
0
파일: PDM.cpp 프로젝트: digitaltembo/jade
//===========================================================================
void Euler2Rot(cv::Mat &R,const double pitch,const double yaw,const double roll,
	       bool full = true)
{
  if(full){if((R.rows != 3) || (R.cols != 3))R.create(3,3,CV_64F);}
  else{if((R.rows != 2) || (R.cols != 3))R.create(2,3,CV_64F);}
  double sina = sin(pitch), sinb = sin(yaw), sinc = sin(roll);
  double cosa = cos(pitch), cosb = cos(yaw), cosc = cos(roll);
  R.db(0,0) = cosb*cosc; R.db(0,1) = -cosb*sinc; R.db(0,2) = sinb;
  R.db(1,0) = cosa*sinc + sina*sinb*cosc;
  R.db(1,1) = cosa*cosc - sina*sinb*sinc;
  R.db(1,2) = -sina*cosb; if(full)AddOrthRow(R); return;
}
예제 #18
0
		KinectPub(): synchronizer(SyncPolicy(10)), encode_format(".png")
		{
			/// Configure variables.
			mat_image.create(240, 320, CV_8UC1);
			mat_depth.create(240, 320, CV_16UC1);
			encode_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
			encode_params.push_back(0);
			msg_pub.id = -1;
			msg_pub.image.reserve(76800);
			msg_pub.depth.reserve(153600);
			count_skip = 0;
		}
	//Constructor
	void init(cv::Mat &I_from_video) {
		vector<int> v(3,10);
		set_global_vecs(cbBounds, v);
		v[0] = 6; v[1] = 20; v[2] = 8; //Just some decent defaults for low side
		set_global_vecs(minMod, v);
		v[0] = 70; v[1] = 20; v[2] = 6; //Decent defaults for high side
		set_global_vecs(maxMod, v);
		Iyuv.create(I_from_video.size(), I_from_video.type());
		mask.create(I_from_video.size(), CV_8UC1);
		row = I_from_video.rows;
		col = I_from_video.cols;
		image_length = row*col;
		cout << "(row,col,len) = (" << row << ", " << col << ", " << image_length << ")" << endl;
		codebooks.resize(image_length);
	}
예제 #20
0
		CameraCalibration(int l_board_w, int l_board_h, int l_nImages) :
				_board_w(l_board_w), _board_h(l_board_h), _nImages(l_nImages),
				_board_n(l_board_w * l_board_h),
				_objectPointsSet(false), _initialisation(false),
				_imagePoints(std::vector<std::vector<cv::Point2f>>()),
				_objectPoints(std::vector<std::vector<cv::Point3f>>()),
				_intrinsicsMatrix(cv::Mat_<float>()), _distortionCoeffs(cv::Mat_<float>()),
				_rvecs(std::vector<cv::Mat>()), _tvecs(std::vector<cv::Mat>()) {

			_intrinsicsMatrix.create(3, 3, CV_32FC1);
			_intrinsicsMatrix.ptr<float>(0)[0] = 1.0f;
			_intrinsicsMatrix.ptr<float>(1)[1] = 1.0f;
			_distortionCoeffs.create(5, 1, CV_32FC1);

		}
예제 #21
0
// How to do sharpening without explicitly using a convolution filter and cv::filter2D
void RFeatures::sharpen_OLD( const cv::Mat &img, cv::Mat &out)
{
    out.create( img.size(), img.type());    // Allocate if necessary

    int channels = img.channels();
    int nc = img.cols * channels;

    for ( int j = 1; j < img.rows-1; ++j) // All rows except first and last
    {
        const uchar* previous = img.ptr<const uchar>(j-1); // Previous row
        const uchar* current = img.ptr<const uchar>(j); // Current row
        const uchar* next = img.ptr<const uchar>(j+1);  // Next row
        uchar* output = out.ptr<uchar>(j);  // Output row

        for ( int i = channels; i < nc - channels; ++i)   // All columns except first and last
        {
            uchar v = 5*current[i] - current[i-channels] - current[i+channels] - previous[i] - next[i];
            *output++ = cv::saturate_cast<uchar>(v);
        }   // end for
    }   // end for

    // Set the unprocesses pixels to 0
    cv::Scalar s(0);
    if (img.channels() == 3)
        s = cv::Scalar(0,0,0);
    out.row(0).setTo( s);
    out.row(out.rows-1).setTo( s);
    out.col(0).setTo( s);
    out.col(out.cols-1).setTo( s);
}   // end sharpen_OLD
예제 #22
0
/**
 * @brief create skeleton by morphology operations
 * @param input  : input image
 * @param output : output image
 * @param kernel : structure element of the morphology operations
 */
void morphology_skeleton(cv::Mat &input, cv::Mat &output, cv::Mat const &kernel)
{
    if(input.type() != CV_8U){
        throw std::runtime_error(COMMON_DEBUG_MESSAGE + "input.type() != CV_8U");
    }

    if(input.data == output.data){
        output = cv::Mat::zeros(input.size(), CV_8U);
    }else{
        output.create(input.size(), CV_8U);
        output.setTo(0);
    }
    cv::Mat temp;
    cv::Mat eroded;
    bool done;
    do
    {
        cv::erode(input, eroded, kernel);
        cv::dilate(eroded, temp, kernel); // temp = open(img)
        cv::subtract(input, temp, temp);
        cv::bitwise_or(output, temp, output);
        eroded.copyTo(input);

        done = (cv::countNonZero(input) == 0);
    } while (!done);

    input = output;
}
예제 #23
0
cv::Mat RFeatures::swapEndBytes( const cv::Mat &img, cv::Mat &out)
{
    out.create( img.size(), img.type());

    int channels = img.channels();
    int nl = img.rows;
    int stepSize = img.cols * channels;
    const uchar* inScanline = NULL;
    uchar* outScanline = NULL;
    int modRes = 0;

    for ( int j = 0; j < nl; ++j)
    {
        inScanline = img.ptr<const uchar>(j);
        outScanline = out.ptr<uchar>(j);

        for ( int i = 0; i < stepSize; ++i)
        {
            modRes = i % channels;
            if ( modRes == 0)   // Put the right input byte into the left output byte
                outScanline[i] = inScanline[i-1 + channels];
            else if ( modRes == (channels - 1)) // Put the left input byte into the right output byte
                outScanline[i] = inScanline[i+1 - channels];
            else    // Not a left or right byte so just copy down normally
                outScanline[i] = inScanline[i];
        }   // end for
    }   // end for

    return out;
}  // end swapEndBytes
예제 #24
0
void sharpen(const cv::Mat& image, cv::Mat& result)
{

    result.create(image.size(), image.type()); // allocate if necessary

    for (int j = 1; j < image.rows - 1; j++)
    { // for all rows (except first and last)

        const uchar* previous = image.ptr<const uchar>(j - 1); // previous row
        const uchar* current = image.ptr<const uchar>(j);      // current row
        const uchar* next = image.ptr<const uchar>(j + 1);     // next row

        uchar* output = result.ptr<uchar>(j); // output row

        for (int i = 1; i < image.cols - 1; i++)
        {

            *output++ = cv::saturate_cast<uchar>(5 * current[i] - current[i - 1] - current[i + 1] -
                                                 previous[i] - next[i]);
            //			output[i]=
            //cv::saturate_cast<uchar>(5*current[i]-current[i-1]-current[i+1]-previous[i]-next[i]);
        }
    }

    // Set the unprocess pixels to 0
    result.row(0).setTo(cv::Scalar(0));
    result.row(result.rows - 1).setTo(cv::Scalar(0));
    result.col(0).setTo(cv::Scalar(0));
    result.col(result.cols - 1).setTo(cv::Scalar(0));
}
예제 #25
0
void NormalRadonTransform::Normalization(const cv::Mat& src, cv::Mat& dest)
{
	int width = src.rows;
	int height = src.cols;
	dest.create(width, height, src.type());
	int max = 0;
	int min = 0;
	for (int i = 0; i < width; i++)
	{
		const uchar* in = src.ptr<uchar>(i);
		for (int j = 0; j < height; j++)
		{
			if (max < *in)
			{
				max = *in;
			}
			if (min > *in)
			{
				min = *in;
			}
			in++;
		}
	}
	//normalization
	int dievation = max - min;
	for (int i = 0; i < width; i++)
	{
		const uchar* in = src.ptr<uchar>(i);
		uchar* out = dest.ptr<uchar>(i);
		for (int j = 0; j < height; j++)
		{
			*out++ = (*in++ - min) / 200;
		}
	}
}
void buildPanorama(const vector<sensor_msgs::ImageConstPtr>& images, const vector<sensor_msgs::CameraInfoConstPtr>& cameraInfos)
{
	double averageTimestampSecs = 0.0;
	static cv::Mat panorama;
	static cv::Mat rotatedImages[MAX_CAMERAS];

	std::string encoding = images[0]->encoding;
	if(encoding == sensor_msgs::image_encodings::YUV422) encoding = sensor_msgs::image_encodings::BGR8;
    //else encoding = sensor_msgs::image_encodings::MONO8; // debug hack for visualization

	// Rotate individual images
	for(size_t i = 0; i < images.size(); i++) {
		cv_bridge::CvImageConstPtr imagePtr = cv_bridge::toCvShare(images[i], encoding);
		rotateImage(imagePtr->image, g_inplaneRotations[i], rotatedImages[i]);
	}

	// Determine full panorama width from rotated camera images
	unsigned int panoramaHeight = 0, panoramaWidth = 0;
	for(size_t i = 0; i < images.size(); i++) {
		if(vertical) {
			panoramaHeight += rotatedImages[i].rows;
			panoramaWidth = max(panoramaWidth, (unsigned)rotatedImages[i].cols);
		}
		else {
			panoramaWidth += rotatedImages[i].cols;
			panoramaHeight = max(panoramaHeight, (unsigned)rotatedImages[i].rows);
		}
	}

	panorama.create(panoramaHeight, panoramaWidth, rotatedImages[0].type()); // only reallocates if size has changed

	// Merge rotated images into panorama
	int currentPos = 0;
	for(size_t i = 0; i < images.size(); i++)
	{
		if(vertical) {
			rotatedImages[i].copyTo(panorama(cv::Rect(0, currentPos, rotatedImages[i].cols, rotatedImages[i].rows)));
			currentPos += rotatedImages[i].rows;
		}
		else {
			rotatedImages[i].copyTo(panorama(cv::Rect(currentPos, 0, rotatedImages[i].cols, rotatedImages[i].rows)));
			currentPos += rotatedImages[i].cols;
		}
		averageTimestampSecs += images[i]->header.stamp.toSec(); 
	}

	averageTimestampSecs /= images.size();

	// Create output image message
	cv_bridge::CvImagePtr outputImagePtr(new cv_bridge::CvImage());
	
	outputImagePtr->image = panorama;
	outputImagePtr->header.seq = images[0]->header.seq;

	// Timestamp must be in the timeframe of the original images, in case we are operating on a bag file!
	outputImagePtr->header.stamp = ros::Time(averageTimestampSecs);
	outputImagePtr->encoding = encoding;

	g_panoramaPublisher.publish(outputImagePtr->toImageMsg());
}
예제 #27
0
static void blendLinearGold(const cv::Mat &img1, const cv::Mat &img2,
                            const cv::Mat &weights1, const cv::Mat &weights2,
                            cv::Mat &result_gold)
{
    result_gold.create(img1.size(), img1.type());

    int cn = img1.channels();

    for (int y = 0; y < img1.rows; ++y)
    {
        const float *weights1_row = weights1.ptr<float>(y);
        const float *weights2_row = weights2.ptr<float>(y);
        const T *img1_row = img1.ptr<T>(y);
        const T *img2_row = img2.ptr<T>(y);
        T *result_gold_row = result_gold.ptr<T>(y);

        for (int x = 0; x < img1.cols * cn; ++x)
        {
            int x1 = x * cn;
            float w1 = weights1_row[x];
            float w2 = weights2_row[x];
            result_gold_row[x] = static_cast<T>((img1_row[x1] * w1
                                                 + img2_row[x1] * w2) / (w1 + w2 + 1e-5f));
        }
    }
}
예제 #28
0
void sharpen2(const cv::Mat& image, cv::Mat& result)
{

    result.create(image.size(), image.type()); // allocate if necessary

    int step = image.step1();
    const uchar* previous = image.data;        // ptr to previous row
    const uchar* current = image.data + step;  // ptr to current row
    const uchar* next = image.data + 2 * step; // ptr to next row
    uchar* output = result.data + step;        // ptr to output row

    for (int j = 1; j < image.rows - 1; j++)
    { // for each row (except first and last)
        for (int i = 1; i < image.cols - 1; i++)
        { // for each column (except first and last)

            output[i] = cv::saturate_cast<uchar>(5 * current[i] - current[i - 1] - current[i + 1] -
                                                 previous[i] - next[i]);
        }

        previous += step;
        current += step;
        next += step;
        output += step;
    }

    // Set the unprocess pixels to 0
    result.row(0).setTo(cv::Scalar(0));
    result.row(result.rows - 1).setTo(cv::Scalar(0));
    result.col(0).setTo(cv::Scalar(0));
    result.col(result.cols - 1).setTo(cv::Scalar(0));
}
bool LibvotSift2OpencvKeyPoints(SiftData &sift_data,
                                std::vector<cv::KeyPoint> &key_points,
                                cv::Mat &descriptors)
{
	int num_features = sift_data.getFeatureNum();
	key_points.resize(num_features);
	DTYPE *&dp = sift_data.getDesPointer();
	LTYPE *&lp = sift_data.getLocPointer();
	int des_dim = sift_data.getDesDim();
	int loc_dim = sift_data.getLocDim();

	descriptors.create(num_features, des_dim, CV_32FC1);
	for (int i = 0; i < num_features; i++) {
		cv::KeyPoint &kp = key_points[i];
		kp.pt.x = lp[i*loc_dim + 0];
		kp.pt.y = lp[i*loc_dim + 1];
		//color = lp[i*loc_dim + 2];
		kp.size = lp[i*loc_dim + 3];
		kp.angle = lp[i*loc_dim + 4];

		// load descriptors
		for (int j = 0; j < des_dim; j++) {
			descriptors.at<float>(i,j) = static_cast<float>(dp[i*des_dim + j]);
		}
	}

	return true;
}
예제 #30
0
// Unused. Implemented as a sanity check.
// This is what the OpenCV documentation says it is doing, but I could only
// achieve identical behaviour using the WARP_INVERSE_MAP flag.
void warpAffine(const cv::Mat& src,
                cv::Mat& dst,
                const cv::Mat& M,
                const cv::Size& size) {
  if (src.type() != cv::DataType<double>::type) {
    throw std::runtime_error("expected 64-bit floating point number");
  }

  dst.create(size.height, size.width, src.type());
  dst = cv::Scalar::all(0);

  for (int x = 0; x < size.width; x += 1) {
    for (int y = 0; y < size.height; y += 1) {
      cv::Mat p = (cv::Mat_<double>(3, 1) << x, y, 1);
      cv::Mat q = M * p;
      int u = std::floor(q.at<double>(0, 0) + 0.5);
      int v = std::floor(q.at<double>(1, 0) + 0.5);

      if (u >= 0 && u < src.cols && v >= 0 && v < src.rows) {
        // Non-inverted mapping!
        dst.at<double>(y, x) = src.at<double>(v, u);
      }
    }
  }
}