/// @brief Construct a Mat from an NDArray object. void matFromNDArrayBoostConverter::construct(PyObject* object, boost::python::converter::rvalue_from_python_stage1_data* data) { namespace python = boost::python; // Object is a borrowed reference, so create a handle indicting it is // borrowed for proper reference counting. python::handle<> handle(python::borrowed(object)); // Obtain a handle to the memory block that the converter has allocated // for the C++ type. typedef python::converter::rvalue_from_python_storage<Mat> storage_type; void* storage = reinterpret_cast<storage_type*>(data)->storage.bytes; // Allocate the C++ type into the converter's memory block, and assign // its handle to the converter's convertible variable. The C++ // container is populated by passing the begin and end iterators of // the python object to the container's constructor. PyArrayObject* oarr = (PyArrayObject*) object; bool needcopy = false, needcast = false; int typenum = PyArray_TYPE(oarr), new_typenum = typenum; int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S : typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : typenum == NPY_INT ? CV_32S : typenum == NPY_INT32 ? CV_32S : typenum == NPY_FLOAT ? CV_32F : typenum == NPY_DOUBLE ? CV_64F : -1; if (type < 0) { needcopy = needcast = true; new_typenum = NPY_INT; type = CV_32S; } #ifndef CV_MAX_DIM const int CV_MAX_DIM = 32; #endif int ndims = PyArray_NDIM(oarr); int size[CV_MAX_DIM + 1]; size_t step[CV_MAX_DIM + 1]; size_t elemsize = CV_ELEM_SIZE1(type); const npy_intp* _sizes = PyArray_DIMS(oarr); const npy_intp* _strides = PyArray_STRIDES(oarr); bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX; for (int i = ndims - 1; i >= 0 && !needcopy; i--) { // these checks handle cases of // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases // b) transposed arrays, where _strides[] elements go in non-descending order // c) flipped arrays, where some of _strides[] elements are negative if ((i == ndims - 1 && (size_t) _strides[i] != elemsize) || (i < ndims - 1 && _strides[i] < _strides[i + 1])) needcopy = true; } if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2]) needcopy = true; if (needcopy) { if (needcast) { object = PyArray_Cast(oarr, new_typenum); oarr = (PyArrayObject*) object; } else { oarr = PyArray_GETCONTIGUOUS(oarr); object = (PyObject*) oarr; } _strides = PyArray_STRIDES(oarr); } for (int i = 0; i < ndims; i++) { size[i] = (int) _sizes[i]; step[i] = (size_t) _strides[i]; } // handle degenerate case if (ndims == 0) { size[ndims] = 1; step[ndims] = elemsize; ndims++; } if (ismultichannel) { ndims--; type |= CV_MAKETYPE(0, size[2]); } if (!needcopy) { Py_INCREF(object); } cv::Mat* m = new (storage) cv::Mat(ndims, size, type, PyArray_DATA(oarr), step); m->u = g_numpyAllocator.allocate(object, ndims, size, type, step); m->allocator = &g_numpyAllocator; m->addref(); data->convertible = storage; }
// special case, when the convertor needs full ArgInfo structure static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) { bool allowND = true; if(!o || o == Py_None) { if( !m.data ) m.allocator = &g_numpyAllocator; return true; } if( PyInt_Check(o) ) { double v[] = {(double)PyInt_AsLong((PyObject*)o), 0., 0., 0.}; m = Mat(4, 1, CV_64F, v).clone(); return true; } if( PyFloat_Check(o) ) { double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.}; m = Mat(4, 1, CV_64F, v).clone(); return true; } if( PyTuple_Check(o) ) { int i, sz = (int)PyTuple_Size((PyObject*)o); m = Mat(sz, 1, CV_64F); for( i = 0; i < sz; i++ ) { PyObject* oi = PyTuple_GET_ITEM(o, i); if( PyInt_Check(oi) ) m.at<double>(i) = (double)PyInt_AsLong(oi); else if( PyFloat_Check(oi) ) m.at<double>(i) = (double)PyFloat_AsDouble(oi); else { failmsg("%s is not a numerical tuple", info.name); m.release(); return false; } } return true; } if( !PyArray_Check(o) ) { failmsg("%s is not a numpy array, neither a scalar", info.name); return false; } PyArrayObject* oarr = (PyArrayObject*) o; bool needcopy = false, needcast = false; int typenum = PyArray_TYPE(oarr), new_typenum = typenum; int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S : typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : typenum == NPY_INT ? CV_32S : typenum == NPY_INT32 ? CV_32S : typenum == NPY_FLOAT ? CV_32F : typenum == NPY_DOUBLE ? CV_64F : -1; if( type < 0 ) { if( typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG ) { needcopy = needcast = true; new_typenum = NPY_INT; type = CV_32S; } else { failmsg("%s data type = %d is not supported", info.name, typenum); return false; } } #ifndef CV_MAX_DIM const int CV_MAX_DIM = 32; #endif int ndims = PyArray_NDIM(oarr); if(ndims >= CV_MAX_DIM) { failmsg("%s dimensionality (=%d) is too high", info.name, ndims); return false; } int size[CV_MAX_DIM+1]; size_t step[CV_MAX_DIM+1]; size_t elemsize = CV_ELEM_SIZE1(type); const npy_intp* _sizes = PyArray_DIMS(oarr); const npy_intp* _strides = PyArray_STRIDES(oarr); bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX; for( int i = ndims-1; i >= 0 && !needcopy; i-- ) { // these checks handle cases of // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases // b) transposed arrays, where _strides[] elements go in non-descending order // c) flipped arrays, where some of _strides[] elements are negative if( (i == ndims-1 && (size_t)_strides[i] != elemsize) || (i < ndims-1 && _strides[i] < _strides[i+1]) ) needcopy = true; } if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] ) needcopy = true; if (needcopy) { if (info.outputarg) { failmsg("Layout of the output array %s is incompatible with cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name); return false; } if( needcast ) { o = PyArray_Cast(oarr, new_typenum); oarr = (PyArrayObject*) o; } else { oarr = PyArray_GETCONTIGUOUS(oarr); o = (PyObject*) oarr; } _strides = PyArray_STRIDES(oarr); } for(int i = 0; i < ndims; i++) { size[i] = (int)_sizes[i]; step[i] = (size_t)_strides[i]; } // handle degenerate case if( ndims == 0) { size[ndims] = 1; step[ndims] = elemsize; ndims++; } if( ismultichannel ) { ndims--; type |= CV_MAKETYPE(0, size[2]); } if( ndims > 2 && !allowND ) { failmsg("%s has more than 2 dimensions", info.name); return false; } m = Mat(ndims, size, type, PyArray_DATA(oarr), step); m.u = g_numpyAllocator.allocate(o, ndims, size, type, step); m.addref(); if( !needcopy ) { Py_INCREF(o); } m.allocator = &g_numpyAllocator; return true; }
Mat fromNDArrayToMat(PyObject* o) { cv::Mat m; bool allowND = true; if (!PyArray_Check(o)) { failmsg("argument is not a numpy array"); if (!m.data) m.allocator = &g_numpyAllocator; } else { PyArrayObject* oarr = (PyArrayObject*) o; bool needcopy = false, needcast = false; int typenum = PyArray_TYPE(oarr), new_typenum = typenum; int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S : typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : typenum == NPY_INT ? CV_32S : typenum == NPY_INT32 ? CV_32S : typenum == NPY_FLOAT ? CV_32F : typenum == NPY_DOUBLE ? CV_64F : -1; if (type < 0) { if (typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG) { needcopy = needcast = true; new_typenum = NPY_INT; type = CV_32S; } else { failmsg("Argument data type is not supported"); m.allocator = &g_numpyAllocator; return m; } } #ifndef CV_MAX_DIM const int CV_MAX_DIM = 32; #endif int ndims = PyArray_NDIM(oarr); if (ndims >= CV_MAX_DIM) { failmsg("Dimensionality of argument is too high"); if (!m.data) m.allocator = &g_numpyAllocator; return m; } int size[CV_MAX_DIM + 1]; size_t step[CV_MAX_DIM + 1]; size_t elemsize = CV_ELEM_SIZE1(type); const npy_intp* _sizes = PyArray_DIMS(oarr); const npy_intp* _strides = PyArray_STRIDES(oarr); bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX; for (int i = ndims - 1; i >= 0 && !needcopy; i--) { // these checks handle cases of // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases // b) transposed arrays, where _strides[] elements go in non-descending order // c) flipped arrays, where some of _strides[] elements are negative if ((i == ndims - 1 && (size_t) _strides[i] != elemsize) || (i < ndims - 1 && _strides[i] < _strides[i + 1])) needcopy = true; } if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2]) needcopy = true; if (needcopy) { if (needcast) { o = PyArray_Cast(oarr, new_typenum); oarr = (PyArrayObject*) o; } else { oarr = PyArray_GETCONTIGUOUS(oarr); o = (PyObject*) oarr; } _strides = PyArray_STRIDES(oarr); } for (int i = 0; i < ndims; i++) { size[i] = (int) _sizes[i]; step[i] = (size_t) _strides[i]; } // handle degenerate case if (ndims == 0) { size[ndims] = 1; step[ndims] = elemsize; ndims++; } if (ismultichannel) { ndims--; type |= CV_MAKETYPE(0, size[2]); } if (ndims > 2 && !allowND) { failmsg("%s has more than 2 dimensions"); } else { m = Mat(ndims, size, type, PyArray_DATA(oarr), step); m.u = g_numpyAllocator.allocate(o, ndims, size, type, step); m.addref(); if (!needcopy) { Py_INCREF(o); } } m.allocator = &g_numpyAllocator; } return m; }