Mat& Mat::operator = (const Scalar& s) { const Mat* arrays[] = { this }; uchar* ptr; NAryMatIterator it(arrays, &ptr, 1); size_t size = it.size*elemSize(); if( s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0 ) { for( size_t i = 0; i < it.nplanes; i++, ++it ) memset( ptr, 0, size ); } else { if( it.nplanes > 0 ) { double scalar[12]; scalarToRawData(s, scalar, type(), 12); size_t blockSize = 12*elemSize1(); for( size_t j = 0; j < size; j += blockSize ) { size_t sz = MIN(blockSize, size - j); memcpy( ptr + j, scalar, sz ); } } for( size_t i = 1; i < it.nplanes; i++ ) { ++it; memcpy( ptr, data, size ); } } return *this; }
UMat UMat::reshape(int new_cn, int new_rows) const { int cn = channels(); UMat hdr = *this; if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 ) { hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags); hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn; return hdr; } CV_Assert( dims <= 2 ); if( new_cn == 0 ) new_cn = cn; int total_width = cols * cn; if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 ) new_rows = rows * total_width / new_cn; if( new_rows != 0 && new_rows != rows ) { int total_size = total_width * rows; if( !isContinuous() ) CV_Error( CV_BadStep, "The matrix is not continuous, thus its number of rows can not be changed" ); if( (unsigned)new_rows > (unsigned)total_size ) CV_Error( CV_StsOutOfRange, "Bad new number of rows" ); total_width = total_size / new_rows; if( total_width * new_rows != total_size ) CV_Error( CV_StsBadArg, "The total number of matrix elements " "is not divisible by the new number of rows" ); hdr.rows = new_rows; hdr.step[0] = total_width * elemSize1(); } int new_width = total_width / new_cn; if( new_width * new_cn != total_width ) CV_Error( CV_BadNumChannels, "The total width is not divisible by the new number of channels" ); hdr.cols = new_width; hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); hdr.step[1] = CV_ELEM_SIZE(hdr.flags); return hdr; }
Mat& Mat::operator = (const Scalar& s) { Size sz = size(); uchar* dst = data; sz.width *= (int)elemSize(); if( isContinuous() ) { sz.width *= sz.height; sz.height = 1; } if( s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0 ) { for( ; sz.height--; dst += step ) memset( dst, 0, sz.width ); } else { int t = type(), esz1 = (int)elemSize1(); double scalar[12]; scalarToRawData(s, scalar, t, 12); int copy_len = 12*esz1; uchar* dst_limit = dst + sz.width; if( sz.height-- ) { while( dst + copy_len <= dst_limit ) { memcpy( dst, scalar, copy_len ); dst += copy_len; } memcpy( dst, scalar, dst_limit - dst ); } if( sz.height ) { dst = dst_limit - sz.width + step; for( ; sz.height--; dst += step ) memcpy( dst, data, sz.width ); } } return *this; }
void Mat::copyTo( OutputArray _dst, InputArray _mask ) const { Mat mask = _mask.getMat(); if( !mask.data ) { copyTo(_dst); return; } int cn = channels(), mcn = mask.channels(); CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) ); bool colorMask = mcn > 1; size_t esz = colorMask ? elemSize1() : elemSize(); BinaryFunc copymask = getCopyMaskFunc(esz); uchar* data0 = _dst.getMat().data; _dst.create( dims, size, type() ); Mat dst = _dst.getMat(); if( dst.data != data0 ) // do not leave dst uninitialized dst = Scalar(0); if( dims <= 2 ) { CV_Assert( size() == mask.size() ); Size sz = getContinuousSize(*this, dst, mask, mcn); copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz); return; } const Mat* arrays[] = { this, &dst, &mask, 0 }; uchar* ptrs[3]; NAryMatIterator it(arrays, ptrs); Size sz((int)(it.size*mcn), 1); for( size_t i = 0; i < it.nplanes; i++, ++it ) copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz); }
inline size_t HostMem::step1() const { return step / elemSize1(); }
inline size_t GpuMat::step1() const { return step / elemSize1(); }
__host__ size_t GpuMat_<T>::step1() const { return step / elemSize1(); }
inline size_t CudaMem::step1() const { return step / elemSize1(); }