示例#1
0
UMat& UMat::setTo(InputArray _value, InputArray _mask)
{
    bool haveMask = !_mask.empty();
#ifdef HAVE_OPENCL
    int tp = type(), cn = CV_MAT_CN(tp);

    if( dims <= 2 && cn <= 4 && CV_MAT_DEPTH(tp) < CV_64F && ocl::useOpenCL() )
    {
        Mat value = _value.getMat();
        CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::UMAT) );
        double buf[4] = { 0, 0, 0, 0 };
        convertAndUnrollScalar(value, tp, (uchar *)buf, 1);

        int scalarcn = cn == 3 ? 4 : cn, rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
        String opts = format("-D dstT=%s -D rowsPerWI=%d -D dstST=%s -D dstT1=%s -D cn=%d",
                             ocl::memopTypeToStr(tp), rowsPerWI,
                             ocl::memopTypeToStr(CV_MAKETYPE(tp, scalarcn)),
                             ocl::memopTypeToStr(CV_MAT_DEPTH(tp)), cn);

        ocl::Kernel setK(haveMask ? "setMask" : "set", ocl::core::copyset_oclsrc, opts);
        if( !setK.empty() )
        {
            ocl::KernelArg scalararg(0, 0, 0, 0, buf, CV_ELEM_SIZE1(tp) * scalarcn);
            UMat mask;

            if( haveMask )
            {
                mask = _mask.getUMat();
                CV_Assert( mask.size() == size() && mask.type() == CV_8UC1 );
                ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
                        dstarg = ocl::KernelArg::ReadWrite(*this);
                setK.args(maskarg, dstarg, scalararg);
            }
            else
            {
                ocl::KernelArg dstarg = ocl::KernelArg::WriteOnly(*this);
                setK.args(dstarg, scalararg);
            }

            size_t globalsize[] = { cols, (rows + rowsPerWI - 1) / rowsPerWI };
            if( setK.run(2, globalsize, NULL, false) )
                return *this;
        }
    }
#endif
    Mat m = getMat(haveMask ? ACCESS_RW : ACCESS_WRITE);
    m.setTo(_value, _mask);
    return *this;
}
示例#2
0
// Converts a ROS Image to a cv::Mat by sharing the data or chaning its endianness if needed
cv::Mat matFromImage(const sensor_msgs::Image& source)
{
  int source_type = getCvType(source.encoding);
  int byte_depth = enc::bitDepth(source.encoding) / 8;
  int num_channels = enc::numChannels(source.encoding);

  if (source.step < source.width * byte_depth * num_channels)
  {
    std::stringstream ss;
    ss << "Image is wrongly formed: step < width * byte_depth * num_channels  or  " << source.step << " != " <<
        source.width << " * " << byte_depth << " * " << num_channels;
    throw Exception(ss.str());
  }

  if (source.height * source.step != source.data.size())
  {
    std::stringstream ss;
    ss << "Image is wrongly formed: height * step != size  or  " << source.height << " * " <<
              source.step << " != " << source.data.size();
    throw Exception(ss.str());
  }

  // If the endianness is the same as locally, share the data
  cv::Mat mat(source.height, source.width, source_type, const_cast<uchar*>(&source.data[0]), source.step);
  if ((boost::endian::order::native == boost::endian::order::big && source.is_bigendian) ||
      (boost::endian::order::native == boost::endian::order::little && !source.is_bigendian) ||
      byte_depth == 1)
    return mat;

  // Otherwise, reinterpret the data as bytes and switch the channels accordingly
  mat = cv::Mat(source.height, source.width, CV_MAKETYPE(CV_8U, num_channels*byte_depth),
                const_cast<uchar*>(&source.data[0]), source.step);
  cv::Mat mat_swap(source.height, source.width, mat.type());

  std::vector<int> fromTo;
  fromTo.reserve(num_channels*byte_depth);
  for(int i = 0; i < num_channels; ++i)
    for(int j = 0; j < byte_depth; ++j)
    {
      fromTo.push_back(byte_depth*i + j);
      fromTo.push_back(byte_depth*i + byte_depth - 1 - j);
    }
  cv::mixChannels(std::vector<cv::Mat>(1, mat), std::vector<cv::Mat>(1, mat_swap), fromTo);

  // Interpret mat_swap back as the proper type
  mat_swap = cv::Mat(source.height, source.width, source_type, mat_swap.data, mat_swap.step);

  return mat_swap;
}
示例#3
0
static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);

    if (channels > 4 || borderType != BORDER_DEFAULT)
        return false;

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
    if (depth == CV_64F && !doubleSupport)
        return false;

    Size ssize = _src.size();
    if ((_dsz.area() != 0) && (_dsz != Size(ssize.width * 2, ssize.height * 2)))
        return false;

    UMat src = _src.getUMat();
    Size dsize = Size(ssize.width * 2, ssize.height * 2);
    _dst.create( dsize, src.type() );
    UMat dst = _dst.getUMat();

    int float_depth = depth == CV_64F ? CV_64F : CV_32F;
    const int local_size = 16;
    char cvt[2][50];
    String buildOptions = format(
            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
            "-D T1=%s -D cn=%d -D LOCAL_SIZE=%d",
            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),
            ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),
            ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),
            doubleSupport ? " -D DOUBLE_SUPPORT" : "",
            ocl::typeToStr(depth), channels, local_size
    );
    size_t globalThreads[2] = { dst.cols, dst.rows };
    size_t localThreads[2] = { local_size, local_size };
    ocl::Kernel k;
    if (ocl::Device::getDefault().isIntel() && channels == 1)
    {
        k.create("pyrUp_unrolled", ocl::imgproc::pyr_up_oclsrc, buildOptions);
        globalThreads[0] = dst.cols/2; globalThreads[1] = dst.rows/2;
    }
    else
        k.create("pyrUp", ocl::imgproc::pyr_up_oclsrc, buildOptions);

    if (k.empty())
        return false;

    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
    return k.run(2, globalThreads, localThreads, false);
}
示例#4
0
/*
 * call-seq:
 *   query -> IplImage or nil
 *
 * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call.
 */
VALUE
rb_query(VALUE self)
{
  IplImage *frame = cvQueryFrame(CVCAPTURE(self));
  if(!frame)
    return Qnil;
  VALUE image = cIplImage::new_object(cvSize(frame->width, frame->height), CV_MAKETYPE(CV_8U, frame->nChannels));
  if (frame->origin == IPL_ORIGIN_TL) {
    cvCopy(frame, CVARR(image));
  }
  else {
    cvFlip(frame, CVARR(image));
  }
  return image;
}
示例#5
0
bool KGDAL2CV::CheckDataType(const GDALDataType& gdalDataType, cv::Mat img)
{
	int TypeMap_GDAL2_0[GDT_TypeCount] = { CV_USRTYPE1, CV_8U, CV_16U, CV_16S, CV_32S, CV_32S, CV_32F, CV_64F, CV_USRTYPE1, CV_USRTYPE1, CV_USRTYPE1, CV_USRTYPE1};
	int imgType = img.type();
	
	if (imgType == CV_MAKETYPE(TypeMap_GDAL2_0[gdalDataType], img.channels()))
	{
		if (gdalDataType == GDT_UInt32) std::cout << "cv::Mat doesn't support datatype: CV_32U!" << std::endl;
		if (TypeMap_GDAL2_0[gdalDataType] == CV_USRTYPE1) std::cout << "user define datatype may be unmatched!" << std::endl;
		return true;
	} 
	std::cout << "Warning: use the different Data Type between cv::Mat and GDAL!\r\n" 
				 "\tproper range cast may be used!" << std::endl;
	return false;
}
示例#6
0
static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
    if (cn > 4 || (depth == CV_64F && !doubleSupport))
        return false;

    Size ssize = _src.size();
    Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;
    if (dsize.height < 2 || dsize.width < 2)
        return false;

    CV_Assert( ssize.width > 0 && ssize.height > 0 &&
            std::abs(dsize.width*2 - ssize.width) <= 2 &&
            std::abs(dsize.height*2 - ssize.height) <= 2 );

    UMat src = _src.getUMat();
    _dst.create( dsize, src.type() );
    UMat dst = _dst.getUMat();

    int float_depth = depth == CV_64F ? CV_64F : CV_32F;
    const int local_size = 256;
    int kercn = 1;
    if (depth == CV_8U && float_depth == CV_32F && cn == 1 && ocl::Device::getDefault().isIntel())
        kercn = 4;
    const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
                                       "BORDER_REFLECT_101" };
    char cvt[2][50];
    String buildOptions = format(
            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
            "-D T1=%s -D cn=%d -D kercn=%d -D fdepth=%d -D %s -D LOCAL_SIZE=%d",
            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, cn)),
            ocl::convertTypeStr(float_depth, depth, cn, cvt[0]),
            ocl::convertTypeStr(depth, float_depth, cn, cvt[1]),
            doubleSupport ? " -D DOUBLE_SUPPORT" : "", ocl::typeToStr(depth),
            cn, kercn, float_depth, borderMap[borderType], local_size
    );
    ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);
    if (k.empty())
        return false;

    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));

    size_t localThreads[2]  = { local_size/kercn, 1 };
    size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, (dst.rows + 1) / 2 };
    return k.run(2, globalThreads, localThreads, false);
}
std::vector<int> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
    std::vector<int> v;

    v.reserve((depth_end - depth_start + 1) * (cn_end - cn_start + 1));

    for (int depth = depth_start; depth <= depth_end; ++depth)
    {
        for (int cn = cn_start; cn <= cn_end; ++cn)
        {
            v.push_back(CV_MAKETYPE(depth, cn));
        }
    }

    return v;
}
  void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){
    int stype = _src.type();
    int dcn = _src.channels();
    int depth = CV_MAT_DEPTH(stype);

    if (0 <= ddepth)
      depth = ddepth;

    Mat src, dst;
    src = _src.getMat();

    Size sz = src.size();

    _dst.create(sz, CV_MAKETYPE(depth, dcn));
    dst = _dst.getMat();

    int imageWidth = src.rows;
    int imageHeight = src.cols;

    Mat srcChannels[3];
    split(src, srcChannels);

    int margineWidth = kernel.cols / 2;
    int margineHeight = kernel.rows / 2;
    double kernelElemCount = (double)(kernel.cols * kernel.rows);

    for(int ch = 0; ch < dcn; ++ch){
      for(int y = 0; y < imageHeight; ++y){
	Vec3d  *ptr = dst.ptr<Vec3d>(y);
	for(int x = 0; x < imageWidth; ++x){
	  if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){
	    ptr[x][ch]
	      = calcKernelOutputAtEdge(srcChannels[ch],
				       kernel, x, y,
				       imageWidth, imageHeight,
				       margineWidth, margineHeight);
	  }else{
	    ptr[x][ch]
	      = calcKernelOutput(srcChannels[ch],
				 kernel, x, y,
				 margineWidth, margineHeight,				 
				 kernelElemCount);
	  }
	}
      }
    }
  }
示例#9
0
MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose)
{
    // handle special case of empty input Mat by creating an empty array
    classid = (classid == mxUNKNOWN_CLASS) ? ClassIDOf[mat.depth()] : classid;
    if (mat.empty()) {
        p_ = mxCreateNumericArray(0, 0, classid, mxREAL);
        if (!p_)
            mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
        return;
    }
    // transpose cv::Mat if needed
    cv::Mat input(mat);
    if (input.dims == 2 && transpose)
        input = input.t();
    // Create a new mxArray (of the specified classID) equivalent to cv::Mat
    const mwSize nchannels = input.channels();
    const int* dims_ = input.size;
    std::vector<mwSize> d(dims_, dims_ + input.dims);
    d.push_back(nchannels);
    std::swap(d[0], d[1]);
    if (classid == mxLOGICAL_CLASS) {
        // OpenCV's logical true is any nonzero, while MATLAB's true is 1.
        cv::compare(input, 0, input, cv::CMP_NE);
        input.setTo(1, input);
        p_ = mxCreateLogicalArray(d.size(), &d[0]);
    }
    else
        p_ = mxCreateNumericArray(d.size(), &d[0], classid, mxREAL);
    if (!p_)
        mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
    // split input cv::Mat into several single-channel arrays
    std::vector<cv::Mat> channels;
    channels.reserve(nchannels);
    cv::split(input, channels);
    // Copy each channel from Mat to mxArray (converting to specified classid),
    // as in: p_(:,:,i) <- cast_to_classid_type(channels[i])
    std::vector<mwSize> si(d.size(), 0);               // subscript index
    const int type = CV_MAKETYPE(DepthOf[classid], 1); // destination type
    for (mwIndex i = 0; i < nchannels; ++i) {
        si[si.size() - 1] = i;                   // last dim is a channel index
        void *ptr = reinterpret_cast<void*>(
            reinterpret_cast<size_t>(mxGetData(p_)) +
            mxGetElementSize(p_) * subs(si));    // ptr to i-th channel data
        cv::Mat m(input.dims, dims_, type, ptr); // only creates Mat header
        channels[i].convertTo(m, type);          // Write to mxArray through m
    }
}
示例#10
0
void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
{
    bool noScale = std::fabs(alpha - 1) < DBL_EPSILON && std::fabs(beta) < DBL_EPSILON;
    int stype = type(), cn = CV_MAT_CN(stype);

    if( _type < 0 )
        _type = _dst.fixedType() ? _dst.type() : stype;
    else
        _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), cn);

    int sdepth = CV_MAT_DEPTH(stype), ddepth = CV_MAT_DEPTH(_type);
    if( sdepth == ddepth && noScale )
    {
        copyTo(_dst);
        return;
    }

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
    bool needDouble = sdepth == CV_64F || ddepth == CV_64F;
    if( dims <= 2 && cn && _dst.isUMat() && ocl::useOpenCL() &&
            ((needDouble && doubleSupport) || !needDouble) )
    {
        char cvt[40];
        ocl::Kernel k("convertTo", ocl::core::convert_oclsrc,
                      format("-D srcT=%s -D dstT=%s -D convertToDT=%s%s", ocl::typeToStr(sdepth),
                             ocl::typeToStr(ddepth), ocl::convertTypeStr(CV_32F, ddepth, 1, cvt),
                             doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
        if (!k.empty())
        {
            UMat src = *this;
            _dst.create( size(), _type );
            UMat dst = _dst.getUMat();

            float alphaf = (float)alpha, betaf = (float)beta;
            k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst, cn), alphaf, betaf);

            size_t globalsize[2] = { dst.cols * cn, dst.rows };
            if (k.run(2, globalsize, NULL, false))
                return;
        }
    }

    Mat m = getMat(ACCESS_READ);
    m.convertTo(_dst, _type, alpha, beta);
}
示例#11
0
MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose)
{
    if (mat.empty())
    {
        p_ = mxCreateNumericArray(0, 0, mxDOUBLE_CLASS, mxREAL);
        if (!p_)
            mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
        return;
    }
    cv::Mat input = (mat.dims == 2 && transpose) ? mat.t() : mat;
    // Create a new mxArray.
    const int nchannels = input.channels();
    const int* dims_ = input.size;
    std::vector<mwSize> d(dims_, dims_ + input.dims);
    d.push_back(nchannels);
    classid = (classid == mxUNKNOWN_CLASS)
        ? ClassIDOf[input.depth()] : classid;
    std::swap(d[0], d[1]);
    if (classid == mxLOGICAL_CLASS)
    {
        // OpenCV's logical true is any nonzero while matlab's true is 1.
        cv::compare(input, 0, input, cv::CMP_NE);
        input.setTo(1, input);
        p_ = mxCreateLogicalArray(d.size(), &d[0]);
    }
    else {
        p_ = mxCreateNumericArray(d.size(), &d[0], classid, mxREAL);
    }
    if (!p_)
        mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
    // Copy each channel.
    std::vector<cv::Mat> channels;
    split(input, channels);
    std::vector<mwSize> si(d.size(), 0); // subscript index.
    int type = CV_MAKETYPE(DepthOf[classid], 1); // destination type.
    for (int i = 0; i < nchannels; ++i)
    {
        si[si.size() - 1] = i; // last dim is a channel index.
        void *ptr = reinterpret_cast<void*>(
                reinterpret_cast<size_t>(mxGetData(p_)) +
                mxGetElementSize(p_) * subs(si));
        cv::Mat m(input.dims, dims_, type, ptr);
        channels[i].convertTo(m, type); // Write to mxArray through m.
    }
}
示例#12
0
/**
 * Load an image in the file of the proprietary format.
 * The file name describes the image configuration as:
 *   *-(width)x(height)x(num_channels)-(opencv_depth_code).bin
 * There is no header in the file. The file is a binary dump of an OpenCV cv::Mat data.
 * For the better portability, an existing format can be used to carry image data.
 */
cv::Mat cv_subtractor::read_binary_image_file(const std::string filename) {
  std::vector<int> tokens;
  { // Extract the information on the image from the file name
    const std::vector<char> delims = {'-', 'x','x','-','.'};
    std::string dir;
    std::string basename;

    parse_path(filename, dir, basename);
    tokens = get_tokens(basename, delims);
    if (tokens.size() != delims.size()) {
      return cv::Mat();
    }
  }

  std::ifstream file(filename, std::ios::binary);
  if (!file.good()) {
    return cv::Mat();
  }
  file.unsetf(std::ios::skipws);

  { // Check file size
    const size_t image_byte_size
      = tokens[1] * tokens[2] * tokens[3] * CV_ELEM_SIZE(tokens[4]);

    file.seekg(0, std::ios::end);
    const size_t file_size = static_cast<size_t>(file.tellg());
    if (image_byte_size != file_size) {
      return cv::Mat();
    }
  }

  // Construct an image data structure
  cv::Mat image(tokens[1], tokens[2], CV_MAKETYPE(tokens[4], tokens[3]));

  // Reset the file pointer
  file.seekg(0, std::ios::beg);

  // Load the image from the file
  std::copy(std::istream_iterator<unsigned char>(file),
            std::istream_iterator<unsigned char>(),
            reinterpret_cast<unsigned char*>(image.data));

  return image;
}
示例#13
0
void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
                 double scale, double delta, int borderType )
{
    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
    if (ddepth < 0)
        ddepth = sdepth;
    int dtype = CV_MAKETYPE(ddepth, cn);
    _dst.create( _src.size(), dtype );

#ifdef HAVE_TEGRA_OPTIMIZATION
    if (tegra::useTegra() && scale == 1.0 && delta == 0)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if (tegra::scharr(src, dst, dx, dy, borderType))
            return;
    }
#endif

#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
    CV_IPP_CHECK()
    {
        if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType))
        {
            CV_IMPL_ADD(CV_IMPL_IPP);
            return;
        }
    }
#endif
    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));

    Mat kx, ky;
    getScharrKernels( kx, ky, dx, dy, false, ktype );
    if( scale != 1 )
    {
        // usually the smoothing part is the slowest to compute,
        // so try to scale it instead of the faster differenciating part
        if( dx == 0 )
            kx *= scale;
        else
            ky *= scale;
    }
    sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
}
示例#14
0
cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj)
{
    CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
    CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

    Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3));
    const Affine3d* dpath = _traj.getMat().ptr<Affine3d>();
    const Affine3f* fpath = _traj.getMat().ptr<Affine3f>();

    if (_traj.depth() == CV_32F)
        for(int i = 0; i < points.cols; ++i)
            points.at<Vec3f>(i) = fpath[i].translation();

    if (_traj.depth() == CV_64F)
        for(int i = 0; i < points.cols; ++i)
            points.at<Vec3d>(i) = dpath[i].translation();

    return points;
}
示例#15
0
 GPUSender(sensor_msgs::ImageConstPtr example, std::string encoding,
         ros::Publisher* pub) :
     image_msg_(new sensor_msgs::Image()),
     publisher_(pub)
 {
     int bitdepth, channels;
     bitdepth = sensor_msgs::image_encodings::bitDepth(encoding);
     channels = sensor_msgs::image_encodings::numChannels(encoding);
     image_msg_->header = example->header;
     image_msg_->height = example->height;
     image_msg_->width = example->width;
     image_msg_->encoding = encoding;
     image_msg_->step = example->width*bitdepth*channels/8;
     image_msg_->data.resize( image_msg_->height * image_msg_->step );
     image_data_ = cv::Mat(image_msg_->height, image_msg_->width,
             CV_MAKETYPE(CV_8U, channels),
             &image_msg_->data[0], image_msg_->step);
     cv::cuda::registerPageLocked(image_data_);
 }
示例#16
0
void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
                int ksize, double scale, double delta, int borderType )
{
    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
    if (ddepth < 0)
        ddepth = sdepth;
    _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );

#ifdef HAVE_TEGRA_OPTIMIZATION
    if (scale == 1.0 && delta == 0)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if (ksize == 3 && tegra::sobel3x3(src, dst, dx, dy, borderType))
            return;
        if (ksize == -1 && tegra::scharr(src, dst, dx, dy, borderType))
            return;
    }
#endif

#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
    if(dx < 3 && dy < 3 && cn == 1 && borderType == BORDER_REPLICATE)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if (IPPDeriv(src, dst, ddepth, dx, dy, ksize,scale))
            return;
    }
#endif
    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));

    Mat kx, ky;
    getDerivKernels( kx, ky, dx, dy, ksize, false, ktype );
    if( scale != 1 )
    {
        // usually the smoothing part is the slowest to compute,
        // so try to scale it instead of the faster differenciating part
        if( dx == 0 )
            kx *= scale;
        else
            ky *= scale;
    }
    sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
}
示例#17
0
static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);

    if (channels > 4 || borderType != BORDER_DEFAULT)
        return false;

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
    if ((depth == CV_64F) && !(doubleSupport))
        return false;

    Size ssize = _src.size();
    Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;
    CV_Assert( ssize.width > 0 && ssize.height > 0 &&
            std::abs(dsize.width*2 - ssize.width) <= 2 &&
            std::abs(dsize.height*2 - ssize.height) <= 2 );

    UMat src = _src.getUMat();
    _dst.create( dsize, src.type() );
    UMat dst = _dst.getUMat();

    int float_depth = depth == CV_64F ? CV_64F : CV_32F;
    char cvt[2][50];
    String buildOptions = format(
            "-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
            "-D T1=%s -D cn=%d",
            ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),
            ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),
            ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),
            doubleSupport ? " -D DOUBLE_SUPPORT" : "",
            ocl::typeToStr(depth), channels
    );
    ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);
    if (k.empty())
        return false;

    k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));

    size_t localThreads[2]  = { 256, 1 };
    size_t globalThreads[2] = { src.cols, dst.rows };
    return k.run(2, globalThreads, localThreads, false);
}
    void merge(const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t& stream) 
    {
        CV_Assert(src);
        CV_Assert(n > 0);
       
        bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) && 
                         DeviceInfo().supports(NATIVE_DOUBLE);
        CV_Assert(src[0].depth() != CV_64F || double_ok);

        int depth = src[0].depth();
        Size size = src[0].size();

        bool single_channel_only = true;
        int total_channels = 0;

        for (size_t i = 0; i < n; ++i)
        {
            CV_Assert(src[i].size() == size);
            CV_Assert(src[i].depth() == depth);
            single_channel_only = single_channel_only && src[i].channels() == 1;
            total_channels += src[i].channels();
        }

        CV_Assert(single_channel_only);
        CV_Assert(total_channels <= 4);

        if (total_channels == 1)  
            src[0].copyTo(dst);
        else 
        {
            dst.create(size, CV_MAKETYPE(depth, total_channels));

            DevMem2D src_as_devmem[4];
            for(size_t i = 0; i < n; ++i)
                src_as_devmem[i] = src[i];

            DevMem2D dst_as_devmem(dst);
            split_merge::merge_caller(src_as_devmem, dst_as_devmem,
                                      total_channels, CV_ELEM_SIZE(depth),
                                      stream);
        }   
    }
示例#19
0
            static void split(const oclMat &mat_src, oclMat *mat_dst)
            {
                CV_Assert(mat_dst);

                int depth = mat_src.depth();
                int num_channels = mat_src.oclchannels();
                Size size = mat_src.size();

                if(num_channels == 1)
                {
                    mat_src.copyTo(mat_dst[0]);
                    return;
                }

                int i;
                for(i = 0; i < num_channels; i++)
                    mat_dst[i].create(size, CV_MAKETYPE(depth, 1));

                split_vector_run(mat_src, mat_dst);
            }
示例#20
0
/*
 * call-seq:
 *   query -> IplImage or nil
 *
 * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call.
 */
VALUE
rb_query(VALUE self)
{
  VALUE image = Qnil;
  try {
    IplImage *frame = cvQueryFrame(CVCAPTURE(self));
    if (!frame)
      return Qnil;
    image = cIplImage::new_object(cvSize(frame->width, frame->height),
				  CV_MAKETYPE(CV_8U, frame->nChannels));
    if (frame->origin == IPL_ORIGIN_TL)
      cvCopy(frame, CVARR(image));
    else
      cvFlip(frame, CVARR(image));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return image;
}
示例#21
0
void ScaleTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);

    // If matrix has not been converted to the right type, do it now as rescaling
    // requires floating point type.
    if (mat.type() != CV_MAKETYPE(m_imageElementType, m_imgChannels))
    {
        mat.convertTo(mat, m_imageElementType);
    }

    auto seed = GetSeed();
    auto rng = m_rngs.pop_or_create([seed]() { return std::make_unique<std::mt19937>(seed); });

    auto index = UniIntT(0, static_cast<int>(m_interp.size()) - 1)(*rng);
    assert(m_interp.size() > 0);
    cv::resize(mat, mat, cv::Size((int)m_imgWidth, (int)m_imgHeight), 0, 0, m_interp[index]);

    m_rngs.push(std::move(rng));
}
示例#22
0
CCV::CCV(Texture* t, int numColors, int coherenceThreshold)
{
	this->m_numColors	 	= numColors;
	this->m_coherenceThreshold 	= coherenceThreshold;
	this->m_numPix 			= t->m_width * t->m_height;

	//convert texture to cv::Mat
	cv::Mat img(cv::Size(t->m_width, t->m_height), CV_MAKETYPE(t->m_numBytesPerChan * 8, t->m_numChannels), t->m_data);

	//split the image into its' r, g and b channel	
	cv::Mat img_planes[3];
	cv::split(img, img_planes);	
	
	//calculate the CCVs
	m_CCV_r = calculateCCV(img_planes[0]);
	m_CCV_g = calculateCCV(img_planes[1]);
	m_CCV_b = calculateCCV(img_planes[2]);
	
	
}
示例#23
0
static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,
                                         const Mat& img2, const vector<KeyPoint>& keypoints2,
                                         Mat& outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, int flags )
{
    Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
    {
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
    }
    else
    {
        outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
        outImg = Scalar::all(0);
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );

        if( img1.type() == CV_8U )
            cvtColor( img1, outImg1, CV_GRAY2BGR );
        else
            img1.copyTo( outImg1 );

        if( img2.type() == CV_8U )
            cvtColor( img2, outImg2, CV_GRAY2BGR );
        else
            img2.copyTo( outImg2 );
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
示例#24
0
DLLEXP void DispCvArrMultiChannel( CvArr *a, char *varName /*= VAR_NAME(varName)*/ )
{
	CvMat tmpHeader, *m;
	m = cvGetMat(a, &tmpHeader);
	int cnNum = CV_MAT_CN(m->type), depth = CV_MAT_DEPTH(m->type);
	CvMat **p = new CvMat *[4];
	for (int i = 0; i < 4; i++)
		p[i] = ( i >= cnNum ? NULL : 
		cvCreateMat(m->rows, m->cols, CV_MAKETYPE(depth, 1)) );
	cvSplit(m, p[0], p[1], p[2], p[3]);
	for (int i = 0; i < cnNum; i++)
	{
		CString str;
		str.Format("%s : channel %d", varName, i);
		DispCvArr(p[i], str.GetBuffer());
	}

	for (int i = 0; i < cnNum; i++)
		cvReleaseMat(&p[i]);
	delete []p;
}
示例#25
0
/*! Get new frame
	\return <true> if video capture object is opened
 */
bool 
ofxCv2FrameGrabber::getNewFrame()
{
	if (!cap.isOpened()) { return false; }
	
	Mat frame;
	cap >> frame;
	
	if (frame.cols != cvImage.cols || 
		frame.rows != cvImage.rows ||
		frame.channels() != cvImage.channels() ||
		frame.depth() != cvImage.depth())
	{
		cvImage.create(frame.rows, frame.cols, CV_MAKETYPE(CV_MAT_DEPTH(frame.depth()),frame.channels()));
	}
	cvtColor(frame, cvImage, CV_BGR2RGB);
	
	textureIsDirty = true;
	
	return true;
}
示例#26
0
文件: draw.cpp 项目: JoeHowse/opencv
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                                         InputArray img2, const std::vector<KeyPoint>& keypoints2,
                                         InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, DrawMatchesFlags flags )
{
    Mat outImg;
    Size img1size = img1.size(), img2size = img2.size();
    Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
    if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
    {
        outImg = _outImg.getMat();
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
    }
    else
    {
        const int cn1 = img1.channels(), cn2 = img2.channels();
        const int out_cn = std::max(3, std::max(cn1, cn2));
        _outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
        outImg = _outImg.getMat();
        outImg = Scalar::all(0);
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );

        _prepareImage(img1, outImg1);
        _prepareImage(img2, outImg2);
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
示例#27
0
文件: main.cpp 项目: caomw/lime
void run_still_image(const std::string& filename) {
    cv::Mat img = cv::imread(filename, cv::IMREAD_COLOR);
    if (img.empty()) {
        std::cout << "Failed to load image \"" << filename << "\"." << std::endl;
        return;
    }

    int width = img.cols;
    int height = img.rows;
    int dim = img.channels();
    img.convertTo(img, CV_32F, 1.0 / 255.0);

    cv::Mat gray, out;
    std::vector<cv::Point2f> points;
    cv::Mat noise;

    cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
    lime::npr::poissonDisk(gray, &points, lime::npr::PDS_RAND_QUEUE, 1.0, 4.0);
    pencilDrawing(img, out, points);

    cv::namedWindow("Input");
    cv::namedWindow("Output");
    cv::imshow("Input", img);
    cv::imshow("Output", out);
    cv::waitKey(0);
    cv::destroyAllWindows();

    if (save_result) {
        out.convertTo(out, CV_MAKETYPE(CV_8U, dim), 255.0);
        noise.convertTo(noise, CV_8UC3, 255.0);

        std::string base = basename(filename);
        std::string fileout = base + "_out.png";
        std::string filenoise = base + "_noise.png";
        cv::imwrite(fileout, out);
        cv::imwrite(filenoise, noise);
        std::cout << "Results are saved !!" << std::endl;
    }
}
示例#28
0
/*
 * Grabs, decodes and returns the next video frame.
 * @overload query
 * @return [IplImage] Next video frame
 * @return [nil] Failed to read next video frame
 * @opencv_func cvQueryFrame
 */
VALUE
rb_query(VALUE self)
{
  VALUE image = Qnil;
  IplImage *frame = NULL;
  try {
    if (!(frame = cvQueryFrame(CVCAPTURE(self)))) {
      return Qnil;
    }
    image = cIplImage::new_object(frame->width, frame->height,
				  CV_MAKETYPE(IPL2CV_DEPTH(frame->depth), frame->nChannels));
    if (frame->origin == IPL_ORIGIN_TL) {
      cvCopy(frame, CVARR(image));
    }
    else {
      cvFlip(frame, CVARR(image));
    }
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return image;
}
示例#29
0
文件: deriv.cpp 项目: 2693/opencv
void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
                 double scale, double delta, int borderType )
{
    Mat src = _src.getMat();
    if (ddepth < 0)
        ddepth = src.depth();
    _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
    Mat dst = _dst.getMat();

#ifdef HAVE_TEGRA_OPTIMIZATION
    if (scale == 1.0 && delta == 0)
        if (tegra::scharr(src, dst, dx, dy, borderType))
            return;
#endif

#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
    if(dx < 2 && dy < 2 && src.channels() == 1 && borderType == 1)
    {
        if(IPPDerivScharr(src, dst, ddepth, dx, dy, scale))
            return;
    }
#endif
    int ktype = std::max(CV_32F, std::max(ddepth, src.depth()));

    Mat kx, ky;
    getScharrKernels( kx, ky, dx, dy, false, ktype );
    if( scale != 1 )
    {
        // usually the smoothing part is the slowest to compute,
        // so try to scale it instead of the faster differenciating part
        if( dx == 0 )
            kx *= scale;
        else
            ky *= scale;
    }
    sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType );
}
示例#30
0
void testproject::GSstreamImages(cv::Mat& leftImage,cv::Mat& rightImage){ //HL changed Img to Image
	Mat doubleImg;
	Mat combined_img;
	Mat upsidedown_img;

	if (videostreaming.videoStreamingOn)
	{
		videostreaming.update_MatVideoBuffer(doubleVideoBuffer, doubleImg);

		Size size( leftImage.cols + rightImage.cols, leftImage.rows );

		//place two images side by side
		combined_img.create( size, CV_MAKETYPE(leftImage.depth(), 3) );
		Mat imgLeft = combined_img( Rect(0, 0, leftImage.cols, leftImage.rows) );
		Mat imgRight = combined_img( Rect(leftImage.cols, 0, rightImage.cols, rightImage.rows) );

		cvtColor( leftImage, imgLeft, CV_GRAY2BGR );
		cvtColor( rightImage, imgRight, CV_GRAY2BGR );

		Mat smaller;

		pyrDown(combined_img, smaller, Size(combined_img.cols/2, combined_img.rows/2));

		flip(smaller, upsidedown_img,0);

		if (toggle < 5) {
			videostreaming.setAsDefaultVideoMode(combinedVideoBuffer);
		} 
		else {
			videostreaming.setAsDefaultVideoMode();
		}
		toggle = toggle == 10 ? 0 : toggle+1;

		videostreaming.update_MatVideoBuffer(combinedVideoBuffer, smaller);
	}

}