host_image2d<V>::host_image2d(cv::Mat m) { assert(m.rows > 0 && m.cols > 0); m.addref(); pitch_ = m.step; data_ = PT((V*) m.data, dummy_free<V>); begin_ = (V*) m.data; domain_ = domain_type(m.rows, m.cols); // *this = static_cast<IplImage*>(&m); }
TensorWrapper::TensorWrapper(cv::Mat & mat) { if (mat.empty()) { this->tensorPtr = nullptr; return; } this->typeCode = static_cast<char>(mat.depth()); THByteTensor *outputPtr = new THByteTensor; // Build new storage on top of the Mat outputPtr->storage = THByteStorage_newWithData( mat.data, mat.step[0] * mat.rows ); int sizeMultiplier; if (mat.channels() == 1) { outputPtr->nDimension = mat.dims; sizeMultiplier = cv::getElemSize(mat.depth()); } else { outputPtr->nDimension = mat.dims + 1; sizeMultiplier = mat.elemSize1(); } outputPtr->size = static_cast<long *>(THAlloc(sizeof(long) * outputPtr->nDimension)); outputPtr->stride = static_cast<long *>(THAlloc(sizeof(long) * outputPtr->nDimension)); if (mat.channels() > 1) { outputPtr->size[outputPtr->nDimension - 1] = mat.channels(); outputPtr->stride[outputPtr->nDimension - 1] = 1; //cv::getElemSize(returnValue.typeCode); } for (int i = 0; i < mat.dims; ++i) { outputPtr->size[i] = mat.size[i]; outputPtr->stride[i] = mat.step[i] / sizeMultiplier; } // Prevent OpenCV from deallocating Mat data mat.addref(); outputPtr->refcount = 0; this->tensorPtr = outputPtr; }
ULONG AddRef() { mat.addref(); return mat.u->refcount; }
bool numpy_to_mat(const PyObject* o, cv::Mat& m, const char* name, bool allowND) { if(!o || o == Py_None) { if( !m.data ) m.allocator = &g_numpyAllocator; return true; } if( !PyArray_Check(o) ) { failmsg("%s is not a numpy array", name); return false; } int typenum = PyArray_TYPE(o); int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S : typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : typenum == NPY_INT || typenum == NPY_LONG ? CV_32S : typenum == NPY_FLOAT ? CV_32F : typenum == NPY_DOUBLE ? CV_64F : -1; if( type < 0 ) { failmsg("%s data type = %d is not supported", name, typenum); return false; } int ndims = PyArray_NDIM(o); if(ndims >= CV_MAX_DIM) { failmsg("%s dimensionality (=%d) is too high", name, ndims); return false; } int size[CV_MAX_DIM+1]; size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type); const npy_intp* _sizes = PyArray_DIMS(o); const npy_intp* _strides = PyArray_STRIDES(o); for(int i = 0; i < ndims; i++) { size[i] = (int)_sizes[i]; step[i] = (size_t)_strides[i]; } if( ndims == 0 || step[ndims-1] > elemsize ) { size[ndims] = 1; step[ndims] = elemsize; ndims++; } if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] ) { ndims--; type |= CV_MAKETYPE(0, size[2]); } if( ndims > 2 && !allowND ) { failmsg("%s has more than 2 dimensions", name); return false; } m = Mat(ndims, size, type, PyArray_DATA(o), step); if( m.data ) { m.refcount = refcountFromPyObject(o); m.addref(); // protect the original numpy array from deallocation // (since Mat destructor will decrement the reference counter) }; m.allocator = &g_numpyAllocator; return true; }