Esempio n. 1
0
void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps)
{
    CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
    if( m.dims != _dims )
    {
        if( m.step.p != m.step.buf )
        {
            fastFree(m.step.p);
            m.step.p = m.step.buf;
            m.size.p = &m.rows;
        }
        if( _dims > 2 )
        {
            m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
            m.size.p = (int*)(m.step.p + _dims) + 1;
            m.size.p[-1] = _dims;
            m.rows = m.cols = -1;
        }
    }

    m.dims = _dims;
    if( !_sz )
        return;

    size_t esz = CV_ELEM_SIZE(m.flags), esz1 = CV_ELEM_SIZE1(m.flags), total = esz;
    for( int i = _dims-1; i >= 0; i-- )
    {
        int s = _sz[i];
        CV_Assert( s >= 0 );
        m.size.p[i] = s;

        if( _steps )
        {
            if (_steps[i] % esz1 != 0)
            {
                CV_Error(Error::BadStep, "Step must be a multiple of esz1");
            }

            m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
        }
        else if( autoSteps )
        {
            m.step.p[i] = total;
            int64 total1 = (int64)total*s;
            if( (uint64)total1 != (size_t)total1 )
                CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
            total = (size_t)total1;
        }
    }

    if( _dims == 1 )
    {
        m.dims = 2;
        m.cols = 1;
        m.step[1] = esz;
    }
}
Esempio n. 2
0
UMat& UMat::setTo(InputArray _value, InputArray _mask)
{
    bool haveMask = !_mask.empty();
#ifdef HAVE_OPENCL
    int tp = type(), cn = CV_MAT_CN(tp);

    if( dims <= 2 && cn <= 4 && CV_MAT_DEPTH(tp) < CV_64F && ocl::useOpenCL() )
    {
        Mat value = _value.getMat();
        CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::UMAT) );
        double buf[4] = { 0, 0, 0, 0 };
        convertAndUnrollScalar(value, tp, (uchar *)buf, 1);

        int scalarcn = cn == 3 ? 4 : cn, rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
        String opts = format("-D dstT=%s -D rowsPerWI=%d -D dstST=%s -D dstT1=%s -D cn=%d",
                             ocl::memopTypeToStr(tp), rowsPerWI,
                             ocl::memopTypeToStr(CV_MAKETYPE(tp, scalarcn)),
                             ocl::memopTypeToStr(CV_MAT_DEPTH(tp)), cn);

        ocl::Kernel setK(haveMask ? "setMask" : "set", ocl::core::copyset_oclsrc, opts);
        if( !setK.empty() )
        {
            ocl::KernelArg scalararg(0, 0, 0, 0, buf, CV_ELEM_SIZE1(tp) * scalarcn);
            UMat mask;

            if( haveMask )
            {
                mask = _mask.getUMat();
                CV_Assert( mask.size() == size() && mask.type() == CV_8UC1 );
                ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
                        dstarg = ocl::KernelArg::ReadWrite(*this);
                setK.args(maskarg, dstarg, scalararg);
            }
            else
            {
                ocl::KernelArg dstarg = ocl::KernelArg::WriteOnly(*this);
                setK.args(dstarg, scalararg);
            }

            size_t globalsize[] = { cols, (rows + rowsPerWI - 1) / rowsPerWI };
            if( setK.run(2, globalsize, NULL, false) )
                return *this;
        }
    }
#endif
    Mat m = getMat(haveMask ? ACCESS_RW : ACCESS_WRITE);
    m.setTo(_value, _mask);
    return *this;
}
Esempio n. 3
0
  pstable_l2_func(int _d, int _k, double _r, CvRNG& rng)
    : d(_d), k(_k), r(_r) {
    assert(sizeof(T) == CV_ELEM_SIZE1(cvtype));
    a = cvCreateMat(k, d, cvtype);
    b = cvCreateMat(k, 1, cvtype);
    r1 = cvCreateMat(k, 1, CV_32SC1);
    r2 = cvCreateMat(k, 1, CV_32SC1);
    cvRandArr(&rng, a, CV_RAND_NORMAL, cvScalar(0), cvScalar(1));
    cvRandArr(&rng, b, CV_RAND_UNI, cvScalar(0), cvScalar(r));
    cvRandArr(&rng, r1, CV_RAND_UNI,
	      cvScalar(std::numeric_limits<int>::min()),
	      cvScalar(std::numeric_limits<int>::max()));
    cvRandArr(&rng, r2, CV_RAND_UNI,
	      cvScalar(std::numeric_limits<int>::min()),
	      cvScalar(std::numeric_limits<int>::max()));
  }
Esempio n. 4
0
inline
size_t HostMem::elemSize1() const
{
    return CV_ELEM_SIZE1(flags);
}
Esempio n. 5
0
inline
size_t GpuMat::elemSize1() const
{
    return CV_ELEM_SIZE1(flags);
}
Esempio n. 6
0
bool  PxMDecoder::readData( Mat& img )
{
    int color = img.channels() > 1;
    uchar* data = img.data;
    int step = (int)img.step;
    PaletteEntry palette[256];
    bool   result = false;
    int  bit_depth = CV_ELEM_SIZE1(m_type)*8;
    int  src_pitch = (m_width*m_bpp*bit_depth/8 + 7)/8;
    int  nch = CV_MAT_CN(m_type);
    int  width3 = m_width*nch;
    int  i, x, y;

    if( m_offset < 0 || !m_strm.isOpened())
        return false;

    AutoBuffer<uchar> _src(src_pitch + 32);
    uchar* src = _src;
    AutoBuffer<uchar> _gray_palette;
    uchar* gray_palette = _gray_palette;

    // create LUT for converting colors
    if( bit_depth == 8 )
    {
        _gray_palette.allocate(m_maxval + 1);
        gray_palette = _gray_palette;

        for( i = 0; i <= m_maxval; i++ )
            gray_palette[i] = (uchar)((i*255/m_maxval)^(m_bpp == 1 ? 255 : 0));

        FillGrayPalette( palette, m_bpp==1 ? 1 : 8 , m_bpp == 1 );
    }

    try
    {
        m_strm.setPos( m_offset );

        switch( m_bpp )
        {
        ////////////////////////// 1 BPP /////////////////////////
        case 1:
            if( !m_binary )
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    for( x = 0; x < m_width; x++ )
                        src[x] = ReadNumber( m_strm, 1 ) != 0;

                    if( color )
                        FillColorRow8( data, src, m_width, palette );
                    else
                        FillGrayRow8( data, src, m_width, gray_palette );
                }
            }
            else
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    m_strm.getBytes( src, src_pitch );

                    if( color )
                        FillColorRow1( data, src, m_width, palette );
                    else
                        FillGrayRow1( data, src, m_width, gray_palette );
                }
            }
            result = true;
            break;

        ////////////////////////// 8 BPP /////////////////////////
        case 8:
        case 24:
            for( y = 0; y < m_height; y++, data += step )
            {
                if( !m_binary )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int code = ReadNumber( m_strm, INT_MAX );
                        if( (unsigned)code > (unsigned)m_maxval ) code = m_maxval;
                        if( bit_depth == 8 )
                            src[x] = gray_palette[code];
                        else
                            ((ushort *)src)[x] = (ushort)code;
                    }
                }
                else
                {
                    m_strm.getBytes( src, src_pitch );
                    if( bit_depth == 16 && !isBigEndian() )
                    {
                        for( x = 0; x < width3; x++ )
                        {
                            uchar v = src[x * 2];
                            src[x * 2] = src[x * 2 + 1];
                            src[x * 2 + 1] = v;
                        }
                    }
                }

                if( img.depth() == CV_8U && bit_depth == 16 )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int v = ((ushort *)src)[x];
                        src[x] = (uchar)(v >> 8);
                    }
                }

                if( m_bpp == 8 ) // image has one channel
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U ) {
                            uchar *d = data, *s = src, *end = src + m_width;
                            for( ; s < end; d += 3, s++)
                                d[0] = d[1] = d[2] = *s;
                        } else {
                            ushort *d = (ushort *)data, *s = (ushort *)src, *end = ((ushort *)src) + m_width;
                            for( ; s < end; s++, d += 3)
                                d[0] = d[1] = d[2] = *s;
                        }
                    }
                    else
                        memcpy( data, src, m_width*(bit_depth/8) );
                }
                else
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U )
                            icvCvt_RGB2BGR_8u_C3R( src, 0, data, 0, cvSize(m_width,1) );
                        else
                            icvCvt_RGB2BGR_16u_C3R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1) );
                    }
                    else if( img.depth() == CV_8U )
                        icvCvt_BGR2Gray_8u_C3C1R( src, 0, data, 0, cvSize(m_width,1), 2 );
                    else
                        icvCvt_BGRA2Gray_16u_CnC1R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1), 3, 2 );
                }
            }
            result = true;
            break;
        default:
            assert(0);
        }
    }
    catch(...)
    {
    }

    return result;
}
Esempio n. 7
0
 int elemSize1() const { return CV_ELEM_SIZE1(type_); }
Esempio n. 8
0
	/// @brief Construct a Mat from an NDArray object.
	static void construct(PyObject* object,
			boost::python::converter::rvalue_from_python_stage1_data* data) {
		namespace python = boost::python;
		// Object is a borrowed reference, so create a handle indicting it is
		// borrowed for proper reference counting.
		python::handle<> handle(python::borrowed(object));

		// Obtain a handle to the memory block that the converter has allocated
		// for the C++ type.
		typedef python::converter::rvalue_from_python_storage<Mat> storage_type;
		void* storage = reinterpret_cast<storage_type*>(data)->storage.bytes;

		// Allocate the C++ type into the converter's memory block, and assign
		// its handle to the converter's convertible variable.  The C++
		// container is populated by passing the begin and end iterators of
		// the python object to the container's constructor.
		PyArrayObject* oarr = (PyArrayObject*) object;

		bool needcopy = false, needcast = false;
		int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
		int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
					typenum == NPY_USHORT ? CV_16U :
					typenum == NPY_SHORT ? CV_16S :
					typenum == NPY_INT ? CV_32S :
					typenum == NPY_INT32 ? CV_32S :
					typenum == NPY_FLOAT ? CV_32F :
					typenum == NPY_DOUBLE ? CV_64F : -1;

		if (type < 0) {
			needcopy = needcast = true;
			new_typenum = NPY_INT;
			type = CV_32S;
		}

#ifndef CV_MAX_DIM
		const int CV_MAX_DIM = 32;
#endif
		int ndims = PyArray_NDIM(oarr);

		int size[CV_MAX_DIM + 1];
		size_t step[CV_MAX_DIM + 1];
		size_t elemsize = CV_ELEM_SIZE1(type);
		const npy_intp* _sizes = PyArray_DIMS(oarr);
		const npy_intp* _strides = PyArray_STRIDES(oarr);
		bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;

		for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
			// these checks handle cases of
			//  a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
			//  b) transposed arrays, where _strides[] elements go in non-descending order
			//  c) flipped arrays, where some of _strides[] elements are negative
			if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
					|| (i < ndims - 1 && _strides[i] < _strides[i + 1]))
				needcopy = true;
		}

		if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
			needcopy = true;

		if (needcopy) {

			if (needcast) {
				object = PyArray_Cast(oarr, new_typenum);
				oarr = (PyArrayObject*) object;
			} else {
				oarr = PyArray_GETCONTIGUOUS(oarr);
				object = (PyObject*) oarr;
			}

			_strides = PyArray_STRIDES(oarr);
		}

		for (int i = 0; i < ndims; i++) {
			size[i] = (int) _sizes[i];
			step[i] = (size_t) _strides[i];
		}

		// handle degenerate case
		if (ndims == 0) {
			size[ndims] = 1;
			step[ndims] = elemsize;
			ndims++;
		}

		if (ismultichannel) {
			ndims--;
			type |= CV_MAKETYPE(0, size[2]);
		}
		if (!needcopy) {
			Py_INCREF(object);
		}
		cv::Mat* m = new (storage) cv::Mat(ndims, size, type, PyArray_DATA(oarr), step);
		m->u = g_numpyAllocator.allocate(object, ndims, size, type, step);
		m->allocator = &g_numpyAllocator;
		m->addref();
		data->convertible = storage;
	}
Esempio n. 9
0
Mat fromNDArrayToMat(PyObject* o) {
	cv::Mat m;
	bool allowND = true;
	if (!PyArray_Check(o)) {
		failmsg("argument is not a numpy array");
		if (!m.data)
			m.allocator = &g_numpyAllocator;
	} else {
		PyArrayObject* oarr = (PyArrayObject*) o;

		bool needcopy = false, needcast = false;
		int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
		int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
					typenum == NPY_USHORT ? CV_16U :
					typenum == NPY_SHORT ? CV_16S :
					typenum == NPY_INT ? CV_32S :
					typenum == NPY_INT32 ? CV_32S :
					typenum == NPY_FLOAT ? CV_32F :
					typenum == NPY_DOUBLE ? CV_64F : -1;

		if (type < 0) {
			if (typenum == NPY_INT64 || typenum == NPY_UINT64
					|| type == NPY_LONG) {
				needcopy = needcast = true;
				new_typenum = NPY_INT;
				type = CV_32S;
			} else {
				failmsg("Argument data type is not supported");
				m.allocator = &g_numpyAllocator;
				return m;
			}
		}

#ifndef CV_MAX_DIM
		const int CV_MAX_DIM = 32;
#endif

		int ndims = PyArray_NDIM(oarr);
		if (ndims >= CV_MAX_DIM) {
			failmsg("Dimensionality of argument is too high");
			if (!m.data)
				m.allocator = &g_numpyAllocator;
			return m;
		}

		int size[CV_MAX_DIM + 1];
		size_t step[CV_MAX_DIM + 1];
		size_t elemsize = CV_ELEM_SIZE1(type);
		const npy_intp* _sizes = PyArray_DIMS(oarr);
		const npy_intp* _strides = PyArray_STRIDES(oarr);
		bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;

		for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
			// these checks handle cases of
			//  a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
			//  b) transposed arrays, where _strides[] elements go in non-descending order
			//  c) flipped arrays, where some of _strides[] elements are negative
			if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
					|| (i < ndims - 1 && _strides[i] < _strides[i + 1]))
				needcopy = true;
		}

		if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
			needcopy = true;

		if (needcopy) {

			if (needcast) {
				o = PyArray_Cast(oarr, new_typenum);
				oarr = (PyArrayObject*) o;
			} else {
				oarr = PyArray_GETCONTIGUOUS(oarr);
				o = (PyObject*) oarr;
			}

			_strides = PyArray_STRIDES(oarr);
		}

		for (int i = 0; i < ndims; i++) {
			size[i] = (int) _sizes[i];
			step[i] = (size_t) _strides[i];
		}

		// handle degenerate case
		if (ndims == 0) {
			size[ndims] = 1;
			step[ndims] = elemsize;
			ndims++;
		}

		if (ismultichannel) {
			ndims--;
			type |= CV_MAKETYPE(0, size[2]);
		}

		if (ndims > 2 && !allowND) {
			failmsg("%s has more than 2 dimensions");
		} else {

			m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
			m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
			m.addref();

			if (!needcopy) {
				Py_INCREF(o);
			}
		}
		m.allocator = &g_numpyAllocator;
	}
	return m;
}
CV_IMPL  void
cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
{
    static CvFuncTable lut_c1_tab, lut_cn_tab;
    static CvLUT_TransformFunc lut_8u_tab[4];
    static int inittab = 0;

    CV_FUNCNAME( "cvLUT" );

    __BEGIN__;

    int  coi1 = 0, coi2 = 0;
    int  depth, cn, lut_cn;
    CvMat  srcstub, *src = (CvMat*)srcarr;
    CvMat  dststub, *dst = (CvMat*)dstarr;
    CvMat  lutstub, *lut = (CvMat*)lutarr;
    uchar* lut_data;
    uchar* shuffled_lut = 0;
    CvSize size;

    if( !inittab )
    {
        icvInitLUT_Transform8uC1RTable( &lut_c1_tab );
        icvInitLUT_Transform8uCnRTable( &lut_cn_tab );
        lut_8u_tab[0] = (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C1R;
        lut_8u_tab[1] = (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C2R;
        lut_8u_tab[2] = (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C3R;
        lut_8u_tab[3] = (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C4R;
        inittab = 1;
    }

    if( !CV_IS_MAT(src) )
        CV_CALL( src = cvGetMat( src, &srcstub, &coi1 ));

    if( !CV_IS_MAT(dst) )
        CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));

    if( !CV_IS_MAT(lut) )
        CV_CALL( lut = cvGetMat( lut, &lutstub ));

    if( coi1 != 0 || coi2 != 0 )
        CV_ERROR( CV_BadCOI, "" );

    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "" );

    if( !CV_ARE_CNS_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedFormats, "" );

    if( CV_MAT_DEPTH( src->type ) > CV_8S )
        CV_ERROR( CV_StsUnsupportedFormat, "" );

    depth = CV_MAT_DEPTH( dst->type );
    cn = CV_MAT_CN( dst->type );
    lut_cn = CV_MAT_CN( lut->type );

    if( !CV_IS_MAT_CONT(lut->type) || (lut_cn != 1 && lut_cn != cn) ||
        !CV_ARE_DEPTHS_EQ( dst, lut ) || lut->width*lut->height != 256 )
        CV_ERROR( CV_StsBadArg, "The LUT must be continuous array \n"
                                "with 256 elements of the same type as destination" );

    size = cvGetMatSize( src );
    if( lut_cn == 1 )
    {
        size.width *= cn;
        cn = 1;
    }

    if( CV_IS_MAT_CONT( src->type & dst->type ))
    {
        size.width *= size.height;
        size.height = 1;
    }

    lut_data = lut->data.ptr;

    if( CV_MAT_DEPTH( src->type ) == CV_8S )
    {
        int half_size = CV_ELEM_SIZE1(depth)*cn*128;
        /*zeng
        shuffled_lut = (uchar*)cvStackAlloc(half_size*2);
        */
       shuffled_lut = (uchar*)cvAlloc(half_size*2);

        // shuffle lut
        memcpy( shuffled_lut, lut_data + half_size, half_size );
        memcpy( shuffled_lut + half_size, lut_data, half_size );

        lut_data = shuffled_lut;
    }

    if( lut_cn == 1 || lut_cn <= 4 && depth == CV_8U )
    {
        CvLUT_TransformFunc func = depth == CV_8U ? lut_8u_tab[cn-1] :
            (CvLUT_TransformFunc)(lut_c1_tab.fn_2d[depth]);

        if( !func )
            CV_ERROR( CV_StsUnsupportedFormat, "" );

        IPPI_CALL( func( src->data.ptr, src->step, dst->data.ptr,
                         dst->step, size, lut_data ));
    }
    else
    {
        CvLUT_TransformCnFunc func =
            (CvLUT_TransformCnFunc)(lut_cn_tab.fn_2d[depth]);

        if( !func )
            CV_ERROR( CV_StsUnsupportedFormat, "" );

        IPPI_CALL( func( src->data.ptr, src->step, dst->data.ptr,
                         dst->step, size, lut_data, cn ));
    }
    if(shuffled_lut)
    {
        cvFree(&shuffled_lut);
    }
    __END__;
}
Esempio n. 11
0
bool numpy_to_mat(const PyObject* o, cv::Mat& m, const char* name, bool allowND)
{
  if(!o || o == Py_None)
    {
        if( !m.data )
            m.allocator = &g_numpyAllocator;
        return true;
    }

    if( !PyArray_Check(o) )
    {
        failmsg("%s is not a numpy array", name);
        return false;
    }

    int typenum = PyArray_TYPE(o);
    int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
               typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
               typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
               typenum == NPY_FLOAT ? CV_32F :
               typenum == NPY_DOUBLE ? CV_64F : -1;

    if( type < 0 )
    {
        failmsg("%s data type = %d is not supported", name, typenum);
        return false;
    }

    int ndims = PyArray_NDIM(o);
    if(ndims >= CV_MAX_DIM)
    {
        failmsg("%s dimensionality (=%d) is too high", name, ndims);
        return false;
    }

    int size[CV_MAX_DIM+1];
    size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
    const npy_intp* _sizes = PyArray_DIMS(o);
    const npy_intp* _strides = PyArray_STRIDES(o);

    for(int i = 0; i < ndims; i++)
    {
        size[i] = (int)_sizes[i];
        step[i] = (size_t)_strides[i];
    }

    if( ndims == 0 || step[ndims-1] > elemsize ) {
        size[ndims] = 1;
        step[ndims] = elemsize;
        ndims++;
    }

    if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
    {
        ndims--;
        type |= CV_MAKETYPE(0, size[2]);
    }

    if( ndims > 2 && !allowND )
    {
        failmsg("%s has more than 2 dimensions", name);
        return false;
    }

    m = Mat(ndims, size, type, PyArray_DATA(o), step);

    if( m.data )
    {
        m.refcount = refcountFromPyObject(o);
        m.addref(); // protect the original numpy array from deallocation
                    // (since Mat destructor will decrement the reference counter)
    };
    m.allocator = &g_numpyAllocator;
    return true;
}