/** * @brief Is Camshift Tracker updated or not? * @param img * @param obj * @param hist * @return bool, whether Camshift tracker has been updated or not */ bool CTrackingAlgs::CamshiftUpdateTracker( const cv::Mat& img, const cv::Rect& obj, cv::MatND& hist) { cv::Mat hsv, hue, mask, backproject; cv::cvtColor( img, hsv, CV_BGR2HSV ); int _vmin = CTrackingAlgs::vmin, _vmax = CTrackingAlgs::vmax; cv::inRange( hsv, cv::Scalar(0,CTrackingAlgs::smin,MIN(_vmin,_vmax),0), cv::Scalar(180,256,MAX(_vmin,_vmax),0), mask ); std::vector<cv::Mat> vhsv(3); cv::split( hsv, vhsv ); vhsv[0].copyTo(hue); double max_val = 0.f; cv::Mat roi = hue( cv::Range(obj.y, obj.y+obj.height), cv::Range(obj.x, obj.x+obj.width) ); cv::Mat roi_mask= mask( cv::Range(obj.y, obj.y+obj.height), cv::Range(obj.x, obj.x+obj.width) ); cv::calcHist( &roi, 1, CTrackingAlgs::channels, roi_mask, hist, 1, CTrackingAlgs::histSize, CTrackingAlgs::ranges, true, // the histogram is uniform false ); cv::minMaxLoc(hist, 0, &max_val, 0, 0); hist.convertTo(hist, hist.type(), (max_val ? 255. / max_val : 0.), 0); return true; }
/** * Convert cv::MatND to MxArray * @param mat cv::MatND object * @param classid classid of mxArray. e.g., mxDOUBLE_CLASS. When mxUNKNOWN_CLASS * is specified, classid will be automatically determined from * the type of cv::Mat. default: mxUNKNOWN_CLASS * @return MxArray object */ MxArray::MxArray(const cv::MatND& mat, mxClassID classid) { if (mat.datastart==mat.dataend) { p_ = mxCreateNumericArray(0,0,mxDOUBLE_CLASS,mxREAL); return; } // Create a new mxArray int nchannels = mat.channels(); const int* dims_ = mat.size; vector<mwSize> d(dims_,dims_+mat.dims); d.push_back(nchannels); classid = (classid == mxUNKNOWN_CLASS) ? ClassIDOf[mat.depth()] : classid; std::swap(d[0],d[1]); p_ = mxCreateNumericArray(d.size(),&d[0],classid,mxREAL); if (!p_) mexErrMsgIdAndTxt("mexopencv:error","Allocation error"); // Copy int depth = CV_MAKETYPE(DepthOf[classid],1); cv::MatND m(mat.dims,mat.size,CV_MAKETYPE(depth,1)); uchar* _data = m.data; uchar* _datastart = m.datastart; uchar* _dataend = m.dataend; m.data = m.datastart = reinterpret_cast<uchar*>(mxGetData(p_)); m.dataend = reinterpret_cast<uchar*>( reinterpret_cast<size_t>(mxGetData(p_))+mxGetElementSize(p_)*numel()); mat.convertTo(m,CV_MAKETYPE(depth,1)); m.data = _data; m.datastart = _datastart; m.dataend = _dataend; }
std::vector<jsk_recognition_msgs::HistogramWithRangeBin> cvMatNDToHistogramWithRangeBinArray(const cv::MatND& cv_hist, float min_value, float max_value) { std::vector<jsk_recognition_msgs::HistogramWithRangeBin> bins(cv_hist.total()); const float bin_width = (max_value - min_value) / cv_hist.total(); for (size_t i = 0; i < cv_hist.total(); i++) { const float left = i * bin_width + min_value; const float right = (i + 1) * bin_width + min_value; jsk_recognition_msgs::HistogramWithRangeBin bin; bin.min_value = left; bin.max_value = right; bin.count = cv_hist.at<float>(0, i); bins[i] = bin; } return bins; }
cv::MatND MxArray::toMatND(int depth, bool transpose) const { // Create cv::MatND object (of the specified depth), equivalent to mxArray std::vector<int> d(dims(), dims()+ndims()); std::swap(d[0], d[1]); depth = (depth == CV_USRTYPE1) ? DepthOf[classID()] : depth; cv::MatND mat(d.size(), &d[0], CV_MAKETYPE(depth, 1)); // Copy from mxArray to cv::MatND (converting to specified depth) const int type = CV_MAKETYPE(DepthOf[classID()], 1); // source type const cv::MatND m(d.size(), &d[0], type, mxGetData(p_)); // only Mat header // Read from mxArray through m, writing into mat m.convertTo(mat, CV_MAKETYPE(depth, 1)); // transpose cv::MatND if needed if (mat.dims==2 && transpose) mat = mat.t(); return mat; }
// We don't know if MatND uses the little-endian dimension order (i.e. dim=0: lowest dimension, dim=nd-1: highest dimension) or the big-endian order. Therefore we will detect the order from the instance. // Note: http://code.google.com/p/pyopencv/issues/detail?id=18 bool get_array_data_arrangement(cv::MatND const &inst, sdcpp::array_data_arrangement &result) { if(!inst.flags) return false; bool endianness = (inst.dims >= 2) && (inst.step[0] > inst.step[1]); bool multichannel = inst.channels() > 1; int i; result.item_size = inst.elemSize1(); result.ndim = inst.dims + multichannel; result.size.resize(result.ndim); result.stride.resize(result.ndim); if(multichannel) { result.size[inst.dims] = inst.channels(); result.stride[inst.dims] = inst.elemSize1(); } if(endianness) { result.total_size = inst.size[0]*inst.step[0]; for(i = 0; i < inst.dims; ++i) { result.size[i] = inst.size[i]; result.stride[i] = inst.step[i]; } } else { result.total_size = inst.size[inst.dims-1]*inst.step[inst.dims-1]; for(i = 0; i < inst.dims; ++i) { result.size[i] = inst.size[inst.dims-1-i]; result.stride[i] = inst.step[inst.dims-1-i]; } } return true; }
double HullToObjectModule::histogramDistance(cv::MatND h1, cv::MatND h2) { cv::MatND d(h1.size(), CV_32FC1); cv::absdiff(h1, h2, d); cv::Scalar s = cv::sum(d); return s.val[0]; }
double setDegu::histogramDistance(cv::MatND h1, cv::MatND h2) { cv::MatND d(h1.size(), CV_32FC1); cv::absdiff(h1, h2, d); cv::Scalar s = cv::sum(d); return s.val[0]; }
double regionalSegmentationModule::histogramDistance(cv::MatND h1, cv::MatND h2) { cv::MatND d(h1.size(), CV_32FC1); cv::absdiff(h1, h2, d); cv::Scalar s = cv::sum(d); return s.val[0]; }