void myConvexityDefects( InputArray _points, InputArray _hull, OutputArray _defects ) { Mat points = _points.getMat(); int ptnum = points.checkVector(2, CV_32S); CV_Assert( ptnum > 3 ); Mat hull = _hull.getMat(); CV_Assert( hull.checkVector(1, CV_32S) > 2 ); std::vector<CvConvexityDefect> seq; convexityDefects(_points, _hull, seq); if( seq.size() == 0 ) { _defects.release(); return; } _defects.create(seq.size(), 1, CV_32SC4); Mat defects = _defects.getMat(); auto it = seq.begin(); CvPoint* ptorg = (CvPoint*)points.data; for( unsigned i = 0; i < seq.size(); ++i, ++it ) { CvConvexityDefect& d = *it; int idx0 = (int)(d.start - ptorg); int idx1 = (int)(d.end - ptorg); int idx2 = (int)(d.depth_point - ptorg); CV_Assert( 0 <= idx0 && idx0 < ptnum ); CV_Assert( 0 <= idx1 && idx1 < ptnum ); CV_Assert( 0 <= idx2 && idx2 < ptnum ); CV_Assert( d.depth >= 0 ); int idepth = cvRound(d.depth*256); defects.at<Vec4i>(i) = Vec4i(idx0, idx1, idx2, idepth); } }
void BTVL1::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output) { if (outPos_ >= storePos_) { _output.release(); return; } readNextFrame(frameSource); if (procPos_ < storePos_) { ++procPos_; processFrame(procPos_); } ++outPos_; CV_OCL_RUN(isUmat_, ocl_processImpl(frameSource, _output)) const Mat& curOutput = at(outPos_, outputs_); if (_output.kind() < _InputArray::OPENGL_BUFFER || _output.isUMat()) curOutput.convertTo(_output, CV_8U); else { curOutput.convertTo(finalOutput_, CV_8U); arrCopy(finalOutput_, _output); } }
void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst) { CV_INSTRUMENT_REGION(); if( nsrc == 0 || !src ) { _dst.release(); return; } int totalCols = 0, cols = 0; for( size_t i = 0; i < nsrc; i++ ) { CV_Assert( src[i].dims <= 2 && src[i].rows == src[0].rows && src[i].type() == src[0].type()); totalCols += src[i].cols; } _dst.create( src[0].rows, totalCols, src[0].type()); Mat dst = _dst.getMat(); for( size_t i = 0; i < nsrc; i++ ) { Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows)); src[i].copyTo(dpart); cols += src[i].cols; } }
bool VideoCapture::read(OutputArray image) { if(grab()) retrieve(image); else image.release(); return !image.empty(); }
bool VideoCapture::read(OutputArray image) { // printf("Read Call!!!"); if(grab()) retrieve(image); else image.release(); return !image.empty(); }
/* dst = src */ void Mat::copyTo( OutputArray _dst ) const { int dtype = _dst.type(); if( _dst.fixedType() && dtype != type() ) { CV_Assert( channels() == CV_MAT_CN(dtype) ); convertTo( _dst, dtype ); return; } if( empty() ) { _dst.release(); return; } if( dims <= 2 ) { _dst.create( rows, cols, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( rows > 0 && cols > 0 ) { const uchar* sptr = data; uchar* dptr = dst.data; // to handle the copying 1xn matrix => nx1 std vector. Size sz = size() == dst.size() ? getContinuousSize(*this, dst) : getContinuousSize(*this); size_t len = sz.width*elemSize(); for( ; sz.height--; sptr += step, dptr += dst.step ) memcpy( dptr, sptr, len ); } return; } _dst.create( dims, size, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( total() != 0 ) { const Mat* arrays[] = { this, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t sz = it.size*elemSize(); for( size_t i = 0; i < it.nplanes; i++, ++it ) memcpy(ptrs[1], ptrs[0], sz); } }
bool VideoCapture::read(OutputArray image) { CV_INSTRUMENT_REGION(); if(grab()) retrieve(image); else image.release(); return !image.empty(); }
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const { if( image.empty() || keypoints.empty() ) { descriptors.release(); return; } KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 ); KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() ); computeImpl( image, keypoints, descriptors ); }
/* * Compute the descriptors for a set of keypoints in an image. * image The image. * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. */ void Feature2D::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) { CV_INSTRUMENT_REGION(); if( image.empty() ) { descriptors.release(); return; } detectAndCompute(image, noArray(), keypoints, descriptors, true); }
void cv::gpu::GeneralizedHough_GPU::download(const GpuMat& d_positions, OutputArray h_positions_, OutputArray h_votes_) { if (d_positions.empty()) { h_positions_.release(); if (h_votes_.needed()) h_votes_.release(); return; } CV_Assert(d_positions.rows == 2 && d_positions.type() == CV_32FC4); h_positions_.create(1, d_positions.cols, CV_32FC4); Mat h_positions = h_positions_.getMat(); d_positions.row(0).download(h_positions); if (h_votes_.needed()) { h_votes_.create(1, d_positions.cols, CV_32SC3); Mat h_votes = h_votes_.getMat(); GpuMat d_votes(1, d_positions.cols, CV_32SC3, const_cast<int3*>(d_positions.ptr<int3>(1))); d_votes.download(h_votes); } }
bool VideoCapture::retrieve(OutputArray image, int channel) { IplImage* _img = cvRetrieveFrame(cap, channel); if( !_img ) { image.release(); return false; } if(_img->origin == IPL_ORIGIN_TL) cv::cvarrToMat(_img).copyTo(image); else { Mat temp = cv::cvarrToMat(_img); flip(temp, image, 0); } return true; }
void cv::viz::computeNormals(const Mesh& mesh, OutputArray _normals) { vtkSmartPointer<vtkPolyData> polydata = getPolyData(WMesh(mesh)); vtkSmartPointer<vtkPolyData> with_normals = VtkUtils::ComputeNormals(polydata); vtkSmartPointer<vtkDataArray> generic_normals = with_normals->GetPointData()->GetNormals(); if(generic_normals) { Mat normals(1, generic_normals->GetNumberOfTuples(), CV_64FC3); Vec3d *optr = normals.ptr<Vec3d>(); for(int i = 0; i < generic_normals->GetNumberOfTuples(); ++i, ++optr) generic_normals->GetTuple(i, optr->val); normals.convertTo(_normals, mesh.cloud.type()); } else _normals.release(); }
void UMat::copyTo(OutputArray _dst) const { int dtype = _dst.type(); if( _dst.fixedType() && dtype != type() ) { CV_Assert( channels() == CV_MAT_CN(dtype) ); convertTo( _dst, dtype ); return; } if( empty() ) { _dst.release(); return; } size_t i, sz[CV_MAX_DIM], srcofs[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize(); for( i = 0; i < (size_t)dims; i++ ) sz[i] = size.p[i]; sz[dims-1] *= esz; ndoffset(srcofs); srcofs[dims-1] *= esz; _dst.create( dims, size.p, type() ); if( _dst.isUMat() ) { UMat dst = _dst.getUMat(); if( u == dst.u && dst.offset == offset ) return; if (u->currAllocator == dst.u->currAllocator) { dst.ndoffset(dstofs); dstofs[dims-1] *= esz; u->currAllocator->copy(u, dst.u, dims, sz, srcofs, step.p, dstofs, dst.step.p, false); return; } } Mat dst = _dst.getMat(); u->currAllocator->download(u, dst.data, dims, sz, srcofs, step.p, dst.step.p); }
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, OutputArray _inliers, int flags) { CV_INSTRUMENT_REGION() Mat opoints0 = _opoints.getMat(), ipoints0 = _ipoints.getMat(); Mat opoints, ipoints; if( opoints0.depth() == CV_64F || !opoints0.isContinuous() ) opoints0.convertTo(opoints, CV_32F); else opoints = opoints0; if( ipoints0.depth() == CV_64F || !ipoints0.isContinuous() ) ipoints0.convertTo(ipoints, CV_32F); else ipoints = ipoints0; int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); CV_Assert(opoints.isContinuous()); CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F); CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3); CV_Assert(ipoints.isContinuous()); CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F); CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2); _rvec.create(3, 1, CV_64FC1); _tvec.create(3, 1, CV_64FC1); Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1); Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1); Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); int model_points = 5; int ransac_kernel_method = SOLVEPNP_EPNP; if( npoints == 4 ) { model_points = 4; ransac_kernel_method = SOLVEPNP_P3P; } Ptr<PointSetRegistrator::Callback> cb; // pointer to callback cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, ransac_kernel_method, useExtrinsicGuess, rvec, tvec); double param1 = reprojectionError; // reprojection error double param2 = confidence; // confidence int param3 = iterationsCount; // number maximum iterations Mat _local_model(3, 2, CV_64FC1); Mat _mask_local_inliers(1, opoints.rows, CV_8UC1); // call Ransac int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers); if( result > 0 ) { vector<Point3d> opoints_inliers; vector<Point2d> ipoints_inliers; opoints = opoints.reshape(3); ipoints = ipoints.reshape(2); opoints.convertTo(opoints_inliers, CV_64F); ipoints.convertTo(ipoints_inliers, CV_64F); const uchar* mask = _mask_local_inliers.ptr<uchar>(); int npoints1 = compressElems(&opoints_inliers[0], mask, 1, npoints); compressElems(&ipoints_inliers[0], mask, 1, npoints); opoints_inliers.resize(npoints1); ipoints_inliers.resize(npoints1); result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, rvec, tvec, false, (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) ? SOLVEPNP_EPNP : flags) ? 1 : -1; } if( result <= 0 || _local_model.rows <= 0) { _rvec.assign(rvec); // output rotation vector _tvec.assign(tvec); // output translation vector if( _inliers.needed() ) _inliers.release(); return false; } else { _rvec.assign(_local_model.col(0)); // output rotation vector _tvec.assign(_local_model.col(1)); // output translation vector } if(_inliers.needed()) { Mat _local_inliers; for (int i = 0; i < npoints; ++i) { if((int)_mask_local_inliers.at<uchar>(i) != 0) // inliers mask _local_inliers.push_back(i); // output inliers vector } _local_inliers.copyTo(_inliers); } return true; }
bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, OutputArray _inliers, int flags) { Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat(); int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); CV_Assert(opoints.isContinuous()); CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F); CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3); CV_Assert(ipoints.isContinuous()); CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F); CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2); _rvec.create(3, 1, CV_64FC1); _tvec.create(3, 1, CV_64FC1); Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1); Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1); Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); Ptr<PointSetRegistrator::Callback> cb; // pointer to callback cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, flags, useExtrinsicGuess, rvec, tvec); int model_points = 4; // minimum of number of model points if( flags == cv::SOLVEPNP_ITERATIVE ) model_points = 6; else if( flags == cv::SOLVEPNP_UPNP ) model_points = 6; else if( flags == cv::SOLVEPNP_EPNP ) model_points = 5; double param1 = reprojectionError; // reprojection error double param2 = confidence; // confidence int param3 = iterationsCount; // number maximum iterations cv::Mat _local_model(3, 2, CV_64FC1); cv::Mat _mask_local_inliers(1, opoints.rows, CV_8UC1); // call Ransac int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers); if( result <= 0 || _local_model.rows <= 0) { _rvec.assign(rvec); // output rotation vector _tvec.assign(tvec); // output translation vector if( _inliers.needed() ) _inliers.release(); return false; } else { _rvec.assign(_local_model.col(0)); // output rotation vector _tvec.assign(_local_model.col(1)); // output translation vector } if(_inliers.needed()) { Mat _local_inliers; int count = 0; for (int i = 0; i < _mask_local_inliers.rows; ++i) { if((int)_mask_local_inliers.at<uchar>(i) == 1) // inliers mask { _local_inliers.push_back(count); // output inliers vector count++; } } _local_inliers.copyTo(_inliers); } return true; }
void convexHull( InputArray _points, OutputArray _hull, bool clockwise, bool returnPoints ) { CV_INSTRUMENT_REGION() CV_Assert(_points.getObj() != _hull.getObj()); Mat points = _points.getMat(); int i, total = points.checkVector(2), depth = points.depth(), nout = 0; int miny_ind = 0, maxy_ind = 0; CV_Assert(total >= 0 && (depth == CV_32F || depth == CV_32S)); if( total == 0 ) { _hull.release(); return; } returnPoints = !_hull.fixedType() ? returnPoints : _hull.type() != CV_32S; bool is_float = depth == CV_32F; AutoBuffer<Point*> _pointer(total); AutoBuffer<int> _stack(total + 2), _hullbuf(total); Point** pointer = _pointer; Point2f** pointerf = (Point2f**)pointer; Point* data0 = points.ptr<Point>(); int* stack = _stack; int* hullbuf = _hullbuf; CV_Assert(points.isContinuous()); for( i = 0; i < total; i++ ) pointer[i] = &data0[i]; // sort the point set by x-coordinate, find min and max y if( !is_float ) { std::sort(pointer, pointer + total, CHullCmpPoints<int>()); for( i = 1; i < total; i++ ) { int y = pointer[i]->y; if( pointer[miny_ind]->y > y ) miny_ind = i; if( pointer[maxy_ind]->y < y ) maxy_ind = i; } } else { std::sort(pointerf, pointerf + total, CHullCmpPoints<float>()); for( i = 1; i < total; i++ ) { float y = pointerf[i]->y; if( pointerf[miny_ind]->y > y ) miny_ind = i; if( pointerf[maxy_ind]->y < y ) maxy_ind = i; } } if( pointer[0]->x == pointer[total-1]->x && pointer[0]->y == pointer[total-1]->y ) { hullbuf[nout++] = 0; } else { // upper half int *tl_stack = stack; int tl_count = !is_float ? Sklansky_( pointer, 0, maxy_ind, tl_stack, -1, 1) : Sklansky_( pointerf, 0, maxy_ind, tl_stack, -1, 1); int *tr_stack = stack + tl_count; int tr_count = !is_float ? Sklansky_( pointer, total-1, maxy_ind, tr_stack, -1, -1) : Sklansky_( pointerf, total-1, maxy_ind, tr_stack, -1, -1); // gather upper part of convex hull to output if( !clockwise ) { std::swap( tl_stack, tr_stack ); std::swap( tl_count, tr_count ); } for( i = 0; i < tl_count-1; i++ ) hullbuf[nout++] = int(pointer[tl_stack[i]] - data0); for( i = tr_count - 1; i > 0; i-- ) hullbuf[nout++] = int(pointer[tr_stack[i]] - data0); int stop_idx = tr_count > 2 ? tr_stack[1] : tl_count > 2 ? tl_stack[tl_count - 2] : -1; // lower half int *bl_stack = stack; int bl_count = !is_float ? Sklansky_( pointer, 0, miny_ind, bl_stack, 1, -1) : Sklansky_( pointerf, 0, miny_ind, bl_stack, 1, -1); int *br_stack = stack + bl_count; int br_count = !is_float ? Sklansky_( pointer, total-1, miny_ind, br_stack, 1, 1) : Sklansky_( pointerf, total-1, miny_ind, br_stack, 1, 1); if( clockwise ) { std::swap( bl_stack, br_stack ); std::swap( bl_count, br_count ); } if( stop_idx >= 0 ) { int check_idx = bl_count > 2 ? bl_stack[1] : bl_count + br_count > 2 ? br_stack[2-bl_count] : -1; if( check_idx == stop_idx || (check_idx >= 0 && pointer[check_idx]->x == pointer[stop_idx]->x && pointer[check_idx]->y == pointer[stop_idx]->y) ) { // if all the points lie on the same line, then // the bottom part of the convex hull is the mirrored top part // (except the exteme points). bl_count = MIN( bl_count, 2 ); br_count = MIN( br_count, 2 ); } } for( i = 0; i < bl_count-1; i++ ) hullbuf[nout++] = int(pointer[bl_stack[i]] - data0); for( i = br_count-1; i > 0; i-- ) hullbuf[nout++] = int(pointer[br_stack[i]] - data0); } if( !returnPoints ) Mat(nout, 1, CV_32S, hullbuf).copyTo(_hull); else { _hull.create(nout, 1, CV_MAKETYPE(depth, 2)); Mat hull = _hull.getMat(); size_t step = !hull.isContinuous() ? hull.step[0] : sizeof(Point); for( i = 0; i < nout; i++ ) *(Point*)(hull.ptr() + i*step) = data0[hullbuf[i]]; } }
bool SURF_OCL::computeDescriptors(const UMat &keypoints, OutputArray _descriptors) { int dsize = params->descriptorSize(); int nFeatures = keypoints.cols; if (nFeatures == 0) { _descriptors.release(); return true; } _descriptors.create(nFeatures, dsize, CV_32F); UMat descriptors; if( _descriptors.isUMat() ) descriptors = _descriptors.getUMat(); else descriptors.create(nFeatures, dsize, CV_32F); ocl::Kernel kerCalcDesc, kerNormDesc; if( dsize == 64 ) { kerCalcDesc.create("SURF_computeDescriptors64", ocl::xfeatures2d::surf_oclsrc, kerOpts); kerNormDesc.create("SURF_normalizeDescriptors64", ocl::xfeatures2d::surf_oclsrc, kerOpts); } else { CV_Assert(dsize == 128); kerCalcDesc.create("SURF_computeDescriptors128", ocl::xfeatures2d::surf_oclsrc, kerOpts); kerNormDesc.create("SURF_normalizeDescriptors128", ocl::xfeatures2d::surf_oclsrc, kerOpts); } size_t localThreads[] = {6, 6}; size_t globalThreads[] = {nFeatures*localThreads[0], localThreads[1]}; if(haveImageSupport) { kerCalcDesc.args(imgTex, img_rows, img_cols, ocl::KernelArg::ReadOnlyNoSize(keypoints), ocl::KernelArg::WriteOnlyNoSize(descriptors)); } else { kerCalcDesc.args(ocl::KernelArg::ReadOnlyNoSize(img), img_rows, img_cols, ocl::KernelArg::ReadOnlyNoSize(keypoints), ocl::KernelArg::WriteOnlyNoSize(descriptors)); } if(!kerCalcDesc.run(2, globalThreads, localThreads, true)) return false; size_t localThreads_n[] = {dsize, 1}; size_t globalThreads_n[] = {nFeatures*localThreads_n[0], localThreads_n[1]}; globalThreads[0] = nFeatures * localThreads[0]; globalThreads[1] = localThreads[1]; bool ok = kerNormDesc.args(ocl::KernelArg::ReadWriteNoSize(descriptors)). run(2, globalThreads_n, localThreads_n, true); if(ok && !_descriptors.isUMat()) descriptors.copyTo(_descriptors); return ok; }
void convexityDefects( InputArray _points, InputArray _hull, OutputArray _defects ) { CV_INSTRUMENT_REGION() Mat points = _points.getMat(); int i, j = 0, npoints = points.checkVector(2, CV_32S); CV_Assert( npoints >= 0 ); if( npoints <= 3 ) { _defects.release(); return; } Mat hull = _hull.getMat(); int hpoints = hull.checkVector(1, CV_32S); CV_Assert( hpoints > 0 ); const Point* ptr = points.ptr<Point>(); const int* hptr = hull.ptr<int>(); std::vector<Vec4i> defects; if ( hpoints < 3 ) //if hull consists of one or two points, contour is always convex { _defects.release(); return; } // 1. recognize co-orientation of the contour and its hull bool rev_orientation = ((hptr[1] > hptr[0]) + (hptr[2] > hptr[1]) + (hptr[0] > hptr[2])) != 2; // 2. cycle through points and hull, compute defects int hcurr = hptr[rev_orientation ? 0 : hpoints-1]; CV_Assert( 0 <= hcurr && hcurr < npoints ); for( i = 0; i < hpoints; i++ ) { int hnext = hptr[rev_orientation ? hpoints - i - 1 : i]; CV_Assert( 0 <= hnext && hnext < npoints ); Point pt0 = ptr[hcurr], pt1 = ptr[hnext]; double dx0 = pt1.x - pt0.x; double dy0 = pt1.y - pt0.y; double scale = dx0 == 0 && dy0 == 0 ? 0. : 1./std::sqrt(dx0*dx0 + dy0*dy0); int defect_deepest_point = -1; double defect_depth = 0; bool is_defect = false; j=hcurr; for(;;) { // go through points to achieve next hull point j++; j &= j >= npoints ? 0 : -1; if( j == hnext ) break; // compute distance from current point to hull edge double dx = ptr[j].x - pt0.x; double dy = ptr[j].y - pt0.y; double dist = fabs(-dy0*dx + dx0*dy) * scale; if( dist > defect_depth ) { defect_depth = dist; defect_deepest_point = j; is_defect = true; } } if( is_defect ) { int idepth = cvRound(defect_depth*256); defects.push_back(Vec4i(hcurr, hnext, defect_deepest_point, idepth)); } hcurr = hnext; } Mat(defects).copyTo(_defects); }
void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask, OutputArray _orientation, double delta1, double delta2, int aperture_size ) { static int runcase = 0; runcase++; Mat mhi = _mhi.getMat(); Size size = mhi.size(); _mask.create(size, CV_8U); _orientation.create(size, CV_32F); Mat mask = _mask.getMat(); Mat orient = _orientation.getMat(); if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) CV_Error( Error::StsOutOfRange, "aperture_size must be 3, 5 or 7" ); if( delta1 <= 0 || delta2 <= 0 ) CV_Error( Error::StsOutOfRange, "both delta's must be positive" ); if( mhi.type() != CV_32FC1 ) CV_Error( Error::StsUnsupportedFormat, "MHI must be single-channel floating-point images" ); if( orient.data == mhi.data ) { _orientation.release(); _orientation.create(size, CV_32F); orient = _orientation.getMat(); } if( delta1 > delta2 ) std::swap(delta1, delta2); float gradient_epsilon = 1e-4f * aperture_size * aperture_size; float min_delta = (float)delta1; float max_delta = (float)delta2; Mat dX_min, dY_max; // calc Dx and Dy Sobel( mhi, dX_min, CV_32F, 1, 0, aperture_size, 1, 0, BORDER_REPLICATE ); Sobel( mhi, dY_max, CV_32F, 0, 1, aperture_size, 1, 0, BORDER_REPLICATE ); int x, y; if( mhi.isContinuous() && orient.isContinuous() && mask.isContinuous() ) { size.width *= size.height; size.height = 1; } // calc gradient for( y = 0; y < size.height; y++ ) { const float* dX_min_row = dX_min.ptr<float>(y); const float* dY_max_row = dY_max.ptr<float>(y); float* orient_row = orient.ptr<float>(y); uchar* mask_row = mask.ptr<uchar>(y); fastAtan2(dY_max_row, dX_min_row, orient_row, size.width, true); // make orientation zero where the gradient is very small for( x = 0; x < size.width; x++ ) { float dY = dY_max_row[x]; float dX = dX_min_row[x]; if( std::abs(dX) < gradient_epsilon && std::abs(dY) < gradient_epsilon ) { mask_row[x] = (uchar)0; orient_row[x] = 0.f; } else mask_row[x] = (uchar)1; } } erode( mhi, dX_min, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE ); dilate( mhi, dY_max, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE ); // mask off pixels which have little motion difference in their neighborhood for( y = 0; y < size.height; y++ ) { const float* dX_min_row = dX_min.ptr<float>(y); const float* dY_max_row = dY_max.ptr<float>(y); float* orient_row = orient.ptr<float>(y); uchar* mask_row = mask.ptr<uchar>(y); for( x = 0; x < size.width; x++ ) { float d0 = dY_max_row[x] - dX_min_row[x]; if( mask_row[x] == 0 || d0 < min_delta || max_delta < d0 ) { mask_row[x] = (uchar)0; orient_row[x] = 0.f; } } } }
float cv::intersectConvexConvex( InputArray _p1, InputArray _p2, OutputArray _p12, bool handleNested ) { CV_INSTRUMENT_REGION(); Mat p1 = _p1.getMat(), p2 = _p2.getMat(); CV_Assert( p1.depth() == CV_32S || p1.depth() == CV_32F ); CV_Assert( p2.depth() == CV_32S || p2.depth() == CV_32F ); int n = p1.checkVector(2, p1.depth(), true); int m = p2.checkVector(2, p2.depth(), true); CV_Assert( n >= 0 && m >= 0 ); if( n < 2 || m < 2 ) { _p12.release(); return 0.f; } AutoBuffer<Point2f> _result(n*2 + m*2 + 1); Point2f *fp1 = _result.data(), *fp2 = fp1 + n; Point2f* result = fp2 + m; int orientation = 0; for( int k = 1; k <= 2; k++ ) { Mat& p = k == 1 ? p1 : p2; int len = k == 1 ? n : m; Point2f* dst = k == 1 ? fp1 : fp2; Mat temp(p.size(), CV_MAKETYPE(CV_32F, p.channels()), dst); p.convertTo(temp, CV_32F); CV_Assert( temp.ptr<Point2f>() == dst ); Point2f diff0 = dst[0] - dst[len-1]; for( int i = 1; i < len; i++ ) { double s = diff0.cross(dst[i] - dst[i-1]); if( s != 0 ) { if( s < 0 ) { orientation++; flip( temp, temp, temp.rows > 1 ? 0 : 1 ); } break; } } } float area = 0.f; int nr = intersectConvexConvex_(fp1, n, fp2, m, result, &area); if( nr == 0 ) { if( !handleNested ) { _p12.release(); return 0.f; } if( pointPolygonTest(_InputArray(fp1, n), fp2[0], false) >= 0 ) { result = fp2; nr = m; } else if( pointPolygonTest(_InputArray(fp2, m), fp1[0], false) >= 0 ) { result = fp1; nr = n; } else { _p12.release(); return 0.f; } area = (float)contourArea(_InputArray(result, nr), false); } if( _p12.needed() ) { Mat temp(nr, 1, CV_32FC2, result); // if both input contours were reflected, // let's orient the result as the input vectors if( orientation == 2 ) flip(temp, temp, 0); temp.copyTo(_p12); } return (float)fabs(area); }
void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, OutputArray _inliers, int flags) { Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat(); Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); CV_Assert(opoints.isContinuous()); CV_Assert(opoints.depth() == CV_32F); CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3); CV_Assert(ipoints.isContinuous()); CV_Assert(ipoints.depth() == CV_32F); CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2); _rvec.create(3, 1, CV_64FC1); _tvec.create(3, 1, CV_64FC1); Mat rvec = _rvec.getMat(); Mat tvec = _tvec.getMat(); Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1); if (minInliersCount <= 0) minInliersCount = objectPoints.cols; cv::pnpransac::Parameters params; params.iterationsCount = iterationsCount; params.minInliersCount = minInliersCount; params.reprojectionError = reprojectionError; params.useExtrinsicGuess = useExtrinsicGuess; params.camera.init(cameraMatrix, distCoeffs); params.flags = flags; vector<int> localInliers; Mat localRvec, localTvec; rvec.copyTo(localRvec); tvec.copyTo(localTvec); if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT) { parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params, localRvec, localTvec, localInliers)); } if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT) { if (flags != CV_P3P) { int i, pointsCount = (int)localInliers.size(); Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2); for (i = 0; i < pointsCount; i++) { int index = localInliers[i]; Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1)); imagePoints.col(index).copyTo(colInlierImagePoints); Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1)); objectPoints.col(index).copyTo(colInlierObjectPoints); } solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags); } localRvec.copyTo(rvec); localTvec.copyTo(tvec); if (_inliers.needed()) Mat(localInliers).copyTo(_inliers); } else { tvec.setTo(Scalar(0)); Mat R = Mat::eye(3, 3, CV_64F); Rodrigues(R, rvec); if( _inliers.needed() ) _inliers.release(); } return; }
/* dst = src */ void Mat::copyTo( OutputArray _dst ) const { int dtype = _dst.type(); if( _dst.fixedType() && dtype != type() ) { CV_Assert( channels() == CV_MAT_CN(dtype) ); convertTo( _dst, dtype ); return; } if( empty() ) { _dst.release(); return; } if( _dst.isUMat() ) { _dst.create( dims, size.p, type() ); UMat dst = _dst.getUMat(); size_t i, sz[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize(); for( i = 0; i < (size_t)dims; i++ ) sz[i] = size.p[i]; sz[dims-1] *= esz; dst.ndoffset(dstofs); dstofs[dims-1] *= esz; dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p); return; } if( dims <= 2 ) { _dst.create( rows, cols, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( rows > 0 && cols > 0 ) { const uchar* sptr = data; uchar* dptr = dst.data; Size sz = getContinuousSize(*this, dst); size_t len = sz.width*elemSize(); for( ; sz.height--; sptr += step, dptr += dst.step ) memcpy( dptr, sptr, len ); } return; } _dst.create( dims, size, type() ); Mat dst = _dst.getMat(); if( data == dst.data ) return; if( total() != 0 ) { const Mat* arrays[] = { this, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t sz = it.size*elemSize(); for( size_t i = 0; i < it.nplanes; i++, ++it ) memcpy(ptrs[1], ptrs[0], sz); } }