Exemplo n.º 1
0
void RotateImage::execute()
{
    const cv::Mat src = mIn;
    int angle = mAngle;
    cv::Mat dest;
    if(angle == 90)
    {
        dest = src.t();
        cv::flip(dest, dest, 1);
    }
    else if(angle == 180)
    {
        cv::flip(src, dest, -1);
    }
    else if(angle == 270)
    {
        dest = src.t();
        cv::flip(dest, dest, 0);
    }
    else
    {
        dest = src;
    }

    mOut.send(dest);
}
Exemplo n.º 2
0
 //
 // concatMatsHorizontal
 //
 cv::Mat concatMatsHorizontal (const cv::Mat &mat_left, const cv::Mat &mat_right)
 {
   CV_Assert (mat_left.rows == mat_right.rows);
   cv::Mat dst_t = mat_left.t ();
   cv::Mat mat_right_t = mat_right.t ();
   dst_t.push_back (mat_right_t);
   return dst_t.t ();
 }
Exemplo n.º 3
0
float vecDist(cv::Mat vec) {
    assert((vec.rows == 1) || (vec.cols == 1));
    assert(vec.type() == CV_64F);
    cv::Mat results;
    
    if (vec.rows > 0)
        results = vec * vec.t();
    else
        results = vec.t() * vec;
    return (float) std::sqrt(results.at<double>(0));
}
Exemplo n.º 4
0
/**
 * Convert cv::Mat to MxArray
 * @param mat cv::Mat object
 * @param classid classid of mxArray. e.g., mxDOUBLE_CLASS. When mxUNKNOWN_CLASS
 *                is specified, classid will be automatically determined from
 *                the type of cv::Mat. default: mxUNKNOWN_CLASS
 * @param transpose Optional transposition to the return value so that rows and
 *                  columns of the 2D Mat are mapped to the 2nd and 1st
 *                  dimensions in MxArray, respectively. This does not apply
 *                  the N-D array conversion. default true.
 * @return MxArray object
 *
 * Convert cv::Mat object to an MxArray. When the cv::Mat object is 2D, the
 * width, height, and channels are mapped to the first, second, and third
 * dimensions of the MxArray unless transpose flag is set to false. When the
 * cv::Mat object is N-D, (dim 1, dim 2,...dim N, channels) are mapped to (dim
 * 2, dim 1, ..., dim N, dim N+1), respectively.
 *
 * Example:
 * @code
 * cv::Mat x(120, 90, CV_8UC3, Scalar(0));
 * mxArray* plhs[0] = MxArray(x);
 * @endcode
 *
 */
MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose)
{
	if (mat.empty()) {
		p_ = mxCreateNumericArray(0,0,mxDOUBLE_CLASS,mxREAL);
		if (!p_)
			mexErrMsgIdAndTxt("mexopencv:error","Allocation error");
		return;
	}
#if CV_MINOR_VERSION >= 2
	const cv::Mat& rm = (mat.dims==2 && transpose) ? cv::Mat(mat.t()) : mat;
#else
	const cv::Mat& rm = (transpose) ? cv::Mat(mat.t()) : mat;
#endif
	
	// Create a new mxArray
	int nchannels = rm.channels();
#if CV_MINOR_VERSION >= 2
	const int* dims_ = rm.size;
	vector<mwSize> d(dims_,dims_+rm.dims);
#else
	int dims_[] = {rm.rows,rm.cols};
	vector<mwSize> d(dims_,dims_+2);
#endif
	d.push_back(nchannels);
	classid = (classid == mxUNKNOWN_CLASS) ? ClassIDOf[rm.depth()] : classid;
	std::swap(d[0],d[1]);
	p_ = (classid==mxLOGICAL_CLASS) ?
		mxCreateLogicalArray(d.size(),&d[0]) :
		mxCreateNumericArray(d.size(),&d[0],classid,mxREAL);
	if (!p_)
		mexErrMsgIdAndTxt("mexopencv:error","Allocation error");
	
	// Copy each channel
	std::vector<cv::Mat> mv;
	cv::split(rm,mv);
	std::vector<mwSize> si(d.size(),0);      // subscript index
	int type = CV_MAKETYPE(DepthOf[classid],1); // destination type
	for (int i = 0; i < nchannels; ++i) {
		si[si.size()-1] = i; // last dim is a channel index
		void *ptr = reinterpret_cast<void*>(
				reinterpret_cast<size_t>(mxGetData(p_))+
				mxGetElementSize(p_)*subs(si));
#if CV_MINOR_VERSION >= 2
		cv::Mat m(rm.dims,dims_,type,ptr);
#else
		cv::Mat m(dims_[0],dims_[1],type,ptr);
#endif
		mv[i].convertTo(m,type); // Write to mxArray through m
	}
}
Exemplo n.º 5
0
void ofxIcp::compute_covariance_matrix(const vector<cv::Point3d> & points_1, const cv::Mat & centroid_1, const vector<cv::Point3d> & points_2, const cv::Mat & centroid_2, cv::Mat & output){
    
    for(int i = 0; i < points_1.size(); i++){
        cv::Mat mat2 = centroid_1.t() - cv::Mat(points_1[i], false).reshape(1);
        cv::Mat mat1 = centroid_2.t() - cv::Mat(points_2[i], false).reshape(1);
        
        cv::Mat mat3 = mat1*mat2.t();
        
        if(i == 0){
            output = mat3;
        }else{
            output += mat3;
        }
    }
}
Exemplo n.º 6
0
void Triangulator::triangulateFromUpVp(cv::Mat &up, cv::Mat &vp, cv::Mat &xyz){

    std::cerr << "WARNING! NOT FULLY IMPLEMENTED!" << std::endl;
    int N = up.rows * up.cols;

    cv::Mat projPointsCam(2, N, CV_32F);
    uc.reshape(0,1).copyTo(projPointsCam.row(0));
    vc.reshape(0,1).copyTo(projPointsCam.row(1));

    cv::Mat projPointsProj(2, N, CV_32F);
    up.reshape(0,1).copyTo(projPointsProj.row(0));
    vp.reshape(0,1).copyTo(projPointsProj.row(1));

    cv::Mat Pc(3,4,CV_32F,cv::Scalar(0.0));
    cv::Mat(calibration.Kc).copyTo(Pc(cv::Range(0,3), cv::Range(0,3)));

    cv::Mat Pp(3,4,CV_32F), temp(3,4,CV_32F);
    cv::Mat(calibration.Rp).copyTo(temp(cv::Range(0,3), cv::Range(0,3)));
    cv::Mat(calibration.Tp).copyTo(temp(cv::Range(0,3), cv::Range(3,4)));
    Pp = cv::Mat(calibration.Kp) * temp;

    cv::Mat xyzw;
    cv::triangulatePoints(Pc, Pp, projPointsCam, projPointsProj, xyzw);

    xyz.create(3, N, CV_32F);
    for(int i=0; i<N; i++){
        xyz.at<float>(0,i) = xyzw.at<float>(0,i)/xyzw.at<float>(3,i);
        xyz.at<float>(1,i) = xyzw.at<float>(1,i)/xyzw.at<float>(3,i);
        xyz.at<float>(2,i) = xyzw.at<float>(2,i)/xyzw.at<float>(3,i);
    }

    xyz = xyz.t();
    xyz = xyz.reshape(3, up.rows);
}
Exemplo n.º 7
0
cv::Mat Transformations::inv(const cv::Mat &aTb)
{
  // inv(T) = [R^t | -R^t p]
  const cv::Mat R = aTb.rowRange(0,3).colRange(0,3);
  const cv::Mat t = aTb.rowRange(0,3).colRange(3,4);
 
  cv::Mat Rt = R.t();
  cv::Mat t2 = -Rt*t;
  
  cv::Mat ret;
  if(aTb.type() == CV_32F)
  {
    ret = (cv::Mat_<float>(4,4) <<
      Rt.at<float>(0,0), Rt.at<float>(0,1), Rt.at<float>(0,2), t2.at<float>(0,0),
      Rt.at<float>(1,0), Rt.at<float>(1,1), Rt.at<float>(1,2), t2.at<float>(1,0),
      Rt.at<float>(2,0), Rt.at<float>(2,1), Rt.at<float>(2,2), t2.at<float>(2,0),
      0, 0, 0, 1);
  
  }else
  {
    ret = (cv::Mat_<double>(4,4) <<
      Rt.at<double>(0,0), Rt.at<double>(0,1), Rt.at<double>(0,2), t2.at<double>(0,0),
      Rt.at<double>(1,0), Rt.at<double>(1,1), Rt.at<double>(1,2), t2.at<double>(1,0),
      Rt.at<double>(2,0), Rt.at<double>(2,1), Rt.at<double>(2,2), t2.at<double>(2,0),
      0, 0, 0, 1);
  }

  return ret;
}
bool motionEstimationEssentialMat (const std::vector<cv::Point2f>& points_1,
                                   const std::vector<cv::Point2f>& points_2,
                                   const cv::Mat& F,
                                   const cv::Mat& K,
                                   cv::Mat& T, cv::Mat& R)
{
    // calculate essential mat
    cv::Mat E = K.t() * F * K; //according to HZ (9.12)

    // decompose right solution for R and T values and saved it to P1. get point cloud of triangulated points
    cv::Mat P;
    std::vector<cv::Point3f> pointCloud;
    bool goodPFound = getRightProjectionMat(E, P, K, points_1, points_2, pointCloud);

    if (!goodPFound) {
        cout << "NO MOVEMENT: no perspective Mat Found" << endl;
        return false;
    }

    cv::Mat T_temp, R_temp;
    decomposeProjectionMat(P, T_temp, R_temp);

    //delete y motion
    //T_temp.at<float>(1)=0;
    T = T_temp;
    R = R_temp;

    return true;
}
Exemplo n.º 9
0
 void KeyFrame::SetPose(const cv::Mat &Rcw,const cv::Mat &tcw)
 {
     boost::mutex::scoped_lock lock(mMutexPose);
     Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3));
     tcw.copyTo(Tcw.col(3).rowRange(0,3));
     
     Ow=-Rcw.t()*tcw;
 }
EstimateCameraPose::EstimateCameraPose(std::vector<cv::Point2f> points1, std::vector<cv::Point2f> points2, cv::Mat K)
{
	matchedpoints1 = points1;
	matchedpoints2 = points2;
	intrinsic = K;
	fundamental = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, Mask);
	E = K.t() * fundamental * K;
}
Exemplo n.º 11
0
    /* "pattern": row input vector, size <1>x<number visible units>
     * OUT "activation": row vector, size <1>x<number hidden units>
     * "mc_steps": number of markov chain monte carlo steps before sampling. DEFAULT: 1
     */
    void RBMbb::hidden_activations_for(const cv::Mat& pattern, cv::Mat& activations, bool probabilities, int mc_steps)
    {    
        cv::Mat visible_state = pattern.t();

        for(int step=0; step < mc_steps; step++)
        {
            if(step == mc_steps-1 && probabilities)
            {
                visible_to_hidden_probabilities_logistic(visible_state, activations);
                break;
            }

            binary_sample_hidden_state(visible_state, activations);
            binary_sample_visible_state(activations, visible_state);
        }
        activations = activations.t();
    }
Exemplo n.º 12
0
    /* "pattern": row input vector, size <1>x<number visible units>
     * OUT "reconstruction": row vector, size <1>x<number visible units>
     * "mc_steps": number of markov chain monte carlo steps before sampling. DEFAULT: 1
     */
    void RBMbb::reconstruct(const cv::Mat& pattern, cv::Mat& reconstruction, int mc_steps)
    {
        reconstruction = pattern.t();
        cv::Mat hidden_state;

        for(int step=0; step < mc_steps; step++)
        {
            binary_sample_hidden_state(reconstruction, hidden_state);
            if(step == mc_steps-1)
            {
                hidden_to_visible_probabilities_logistic(hidden_state, reconstruction);
                break;
            }
            binary_sample_visible_state(hidden_state, reconstruction);
        }
        reconstruction = reconstruction.t();
    }
Exemplo n.º 13
0
cv::Mat invRtTransformMatrix(const cv::Mat& rot, const cv::Mat& trans) {
    cv::Mat result = cv::Mat::eye(4, 4, trans.depth());
    cv::Mat invR = rot.t();
    cv::Mat invT = -invR * trans;
    invR.copyTo(result(cv::Range(0, 3), cv::Range(0, 3)));
    invT.copyTo(result(cv::Range(0, 3), cv::Range(3, 4)));
    return result;
}
Exemplo n.º 14
0
void rotate (cv::Mat& src, cv::Mat& dst) {

	if(angle >= 360) {
		angle -= 360;
	}
	cout << "angle = " << angle << endl;	

	if(angle == 90) {
		flip(src.t(), dst, 1);
	}
	else if(angle == 180) {
		flip(src, dst, -1);	
	}
	else if(angle == 270) {
		flip(src.t(), dst, 0);
	}
	
}
Exemplo n.º 15
0
void ofxIcp::compute_rigid_transformation(const vector<cv::Point3d> & points_1, const cv::Mat & centroid_1, const vector<cv::Point3d> & points_2, const cv::Mat & centroid_2, cv::Mat & rotation, cv::Mat & translation){
    
    cv::Mat covariance_matrix;
    compute_covariance_matrix(points_1, centroid_1, points_2, centroid_2, covariance_matrix);
    
    compute_rotation_matrix(covariance_matrix, rotation);
    
    cv::Mat rot_centroid_1_mat = rotation * centroid_1.t();
    translation = centroid_2 - rot_centroid_1_mat.t();
}
Exemplo n.º 16
0
//==============================================================================
void 
myInitFunc(cv::Mat &/*im*/,   //image containing object
	   cv::Mat &s,    //current shape describing object
	   cv::Mat &dxdp, //shape model jacobian at shape
	   cv::Mat &H,    //contains Hessian on return
	   cv::Mat &g,    //contains gradient on return
	   void* data)    //additional data
{
  myInitData* d = (myInitData*)data;
  // if(d->calculate){
  //   d->pra->Predict(im,s,d->mu,d->cov,d->covi);
  //   if(d->mu.rows == 0){
  //     H = cv::Mat::zeros(dxdp.cols,dxdp.cols,CV_64F);
  //     g = cv::Mat::zeros(dxdp.cols,1,CV_64F);
  //   }else{H = dxdp.t()*d->covi*dxdp; g = dxdp.t()*d->covi*(d->mu - s); }
  // }else
  {H = dxdp.t()*d->covi*dxdp; g = dxdp.t()*d->covi*(d->mu - s); }
  d->calculate = false; return;
}
Exemplo n.º 17
0
void transform (cv::Mat points3d, cv::Mat R, cv::Mat t,
                cv::Mat &points3d_out) {
    int n = points3d.rows;
    points3d_out = Mat (n, 3, matrix_type);
    for ( int i = 0; i < n; ++i ) {
        // produit de transposé : (R * pt.t() + t.t()).t() == pt * R.t() + t
        Mat res = points3d.row(i) * R.t() + t;
        res.copyTo(points3d_out.row(i));
    }
}
Exemplo n.º 18
0
float vecCosine(cv::Mat vec1, cv::Mat vec2) {
    assert((vec1.rows == vec2.rows) && (vec1.cols == vec2.cols));
    assert(vec1.type() == vec2.type());
    assert(vec1.type() == CV_64F);
    
    cv::Mat m1, d1, d2;
    double d1_s, d2_s;
    
    m1 = (vec1 * vec2.t());
    assert((m1.rows == 1) && (m1.cols == 1));
    d1 = (vec1*vec1.t());
    assert((d1.rows == 1) && (d1.cols == 1));
    d1_s = sqrt(d1.at<double>(0,0));
    d2 = (vec2*vec2.t());
    assert((d2.rows == 1) && (d2.cols == 1));
    d2_s = sqrt(d2.at<double>(0,0));
    
    return (float) (m1.at<double>(0,0) / (double) (d1_s * d2_s));
}
Exemplo n.º 19
0
double logVecGausPDF(cv::Mat vec, cv::Mat mean, cv::Mat stddev) {
    cv::Mat covar = cv::Mat::diag(stddev.t());
    cv::Mat result = -.5 * (vec - mean) * covar.inv() * (vec - mean).t();
    
    double det = cv::determinant(covar);
    
    double x1 = (1 / (std::pow(2 * M_PI, vec.cols/2)) * std::pow(det, .5));
        
    return result.at<double>(0,0) + std::log(x1);
}
Exemplo n.º 20
0
void CovarianceModel::addAppearance(cv::Mat & img, const cv::Rect &detBox)
{
  cv::Mat newCovMat = cv::Mat::zeros(5,5, CV_64FC1);

  cv::Mat target = (detBox == cv::Rect()) ? img : img(detBox);
  cv::cvtColor(target, target, CV_RGB2GRAY);

  const int totalPoints = target.rows * target.cols;

  _Ix.create(target.rows, target.cols, CV_64FC1);
  _Iy.create(target.rows, target.cols, CV_64FC1);
  _Ixx.create(target.rows, target.cols, CV_64FC1);
  _Iyy.create(target.rows, target.cols, CV_64FC1);
  _Ixy.create(target.rows, target.cols, CV_64FC1);

  cv::filter2D(target, _Ix, CV_64FC1, _kernelX);
  cv::filter2D(target, _Iy, CV_64FC1, _kernelY);
  cv::filter2D(target, _Ixx, CV_64FC1, _kernelXX);
  cv::filter2D(target, _Iyy, CV_64FC1, _kernelYY);
  cv::Sobel(target, _Ixy, CV_64FC1, 1, 1);

  cv::Mat meanVec(5, 1, CV_64FC1);
  meanVec.at<double>(0,0) = cv::mean(_Ix)(0); // cv::Scalar s; s(0) will give the first element in the scalar
  meanVec.at<double>(1,0) = cv::mean(_Iy)(0);
  meanVec.at<double>(2,0) = cv::mean(_Ixx)(0);
  meanVec.at<double>(3,0) = cv::mean(_Iyy)(0);
  meanVec.at<double>(4,0) = cv::mean(_Ixy)(0);

  cv::Mat pix(5, 1, CV_64FC1);

  for(int i=0; i<target.rows; ++i)
  {
    for(int j=0; j<target.cols; ++j)
    {
      pix.at<double>(0,0) = _Ix.at<double>(i,j);
      pix.at<double>(1,0) = _Iy.at<double>(i,j);
      pix.at<double>(2,0) = _Ixx.at<double>(i,j);
      pix.at<double>(3,0) = _Iyy.at<double>(i,j);
      pix.at<double>(4,0) = _Ixy.at<double>(i,j);

      const cv::Mat pixMinMeanVec = pix-meanVec;
      const cv::Mat currCovMat = (pixMinMeanVec * pixMinMeanVec.t());

      newCovMat += currCovMat;
    }
  }
  newCovMat /= (totalPoints-1);

  _covMatrices.push_back(newCovMat);

  if(_covMatrices.size() > 2)
    _updateGaussian();
}
Exemplo n.º 21
0
mat Converter::cvMat2AdMat(const cv::Mat& cvmat){
    /**
     * Convert an OpenCV Mat to an Armadillo mat.
     * The OpenCV Mat entries should be uchars.
     * Note that Armadillo's mat is Mat<double>
     */

    // Set to column-based order.
    cv::Mat cvmatTmp = cvmat.t();
    arma::Mat<uchar> admatUchar(reinterpret_cast<uchar*> (cvmatTmp.data), cvmat.rows, cvmat.cols);
    return conv_to<mat>::from(admatUchar);
}
Exemplo n.º 22
0
void odomError(cv::Mat &FDrl, cv::Mat &Ed, cv::Mat &Ep) {
    Ep = FDrl*Ed*FDrl.t();

//    cout << "Ed:\n" << Ed.at<float>(0,0) << " | "  << Ed.at<float>(0,1) << endl;
//    cout << Ed.at<float>(1,0) << " | "  << Ed.at<float>(1,1) << endl;
//    
//    cout << "FDrl:\n" << FDrl.at<float>(0,0) << " | "  << FDrl.at<float>(0,1) << endl;
//    cout << FDrl.at<float>(1,0) << " | "  << FDrl.at<float>(1,1) << endl;
//    cout << FDrl.at<float>(2,0) << " | "  << FDrl.at<float>(2,1) << endl;
//    
//    cout << "Ep:\n" << Ep.at<float>(0,0) << " | "  << Ep.at<float>(0,1) << " | "  << Ep.at<float>(0,2) << endl;
//    cout << Ep.at<float>(1,0) << " | "  << Ep.at<float>(1,1) << " | "  << Ep.at<float>(1,2) << endl;
//    cout << Ep.at<float>(2,0) << " | "  << Ep.at<float>(2,1) << " | "  << Ep.at<float>(2,2) << endl; 
}
Exemplo n.º 23
0
std::vector<cv::Point2d> matToPoints(cv::Mat m)
{
    std::vector<cv::Point2d> pts;
    cv::Point pt;

    if(m.rows==3) m=m.t();
    int rows=m.rows;
    for(int r=0; r<rows; r++){
        pt.x=m.at<double_t>(r,0)/m.at<double_t>(r,2);
        pt.y=m.at<double_t>(r,1)/m.at<double_t>(r,2);
        pts.push_back(pt);
    }

    return pts;
}
Exemplo n.º 24
0
bool SolveProjectionFromF(const cv::Mat &F, cv::Mat &P1, cv::Mat &P2) {
  P1 = cv::Mat::eye(3, 4, CV_64F);
  P2 = cv::Mat::zeros(3, 4, CV_64F);
  cv::Mat e2 = cv::Mat::zeros(3, 1, CV_64F);
  cv::SVD::solveZ(F.t(), e2);
  // TODO: Verify e2 is valid.
  cv::Mat P33 = P2(cv::Rect(0, 0, 3, 3));
  P33 = SkewSymmetricMatrix(e2) * F;

  e2.copyTo(P2(cv::Rect(3, 0, 1, 3)));

  std::cout << "Compute P from F...\nP1:\n" << P1 << "\nP2:\n" << P2
            << std::endl;

  return true;
}
Exemplo n.º 25
0
MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose)
{
    if (mat.empty())
    {
        p_ = mxCreateNumericArray(0, 0, mxDOUBLE_CLASS, mxREAL);
        if (!p_)
            mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
        return;
    }
    cv::Mat input = (mat.dims == 2 && transpose) ? mat.t() : mat;
    // Create a new mxArray.
    const int nchannels = input.channels();
    const int* dims_ = input.size;
    std::vector<mwSize> d(dims_, dims_ + input.dims);
    d.push_back(nchannels);
    classid = (classid == mxUNKNOWN_CLASS)
        ? ClassIDOf[input.depth()] : classid;
    std::swap(d[0], d[1]);
    if (classid == mxLOGICAL_CLASS)
    {
        // OpenCV's logical true is any nonzero while matlab's true is 1.
        cv::compare(input, 0, input, cv::CMP_NE);
        input.setTo(1, input);
        p_ = mxCreateLogicalArray(d.size(), &d[0]);
    }
    else {
        p_ = mxCreateNumericArray(d.size(), &d[0], classid, mxREAL);
    }
    if (!p_)
        mexErrMsgIdAndTxt("mexopencv:error", "Allocation error");
    // Copy each channel.
    std::vector<cv::Mat> channels;
    split(input, channels);
    std::vector<mwSize> si(d.size(), 0); // subscript index.
    int type = CV_MAKETYPE(DepthOf[classid], 1); // destination type.
    for (int i = 0; i < nchannels; ++i)
    {
        si[si.size() - 1] = i; // last dim is a channel index.
        void *ptr = reinterpret_cast<void*>(
                reinterpret_cast<size_t>(mxGetData(p_)) +
                mxGetElementSize(p_) * subs(si));
        cv::Mat m(input.dims, dims_, type, ptr);
        channels[i].convertTo(m, type); // Write to mxArray through m.
    }
}
Exemplo n.º 26
0
cv::Mat PCANet::trainLDA(const cv::Mat &features, const cv::Mat &labels, int dimensionLDA)
{
    PROFILE;

    cv::Mat evals, evecs;
    eigen(cv::Mat(features.t() * features), evals, evecs);

    for (int i=0; i<dimensionLDA; i++)
    {
        projVecPCA.push_back(evecs.row(i));
    }

    cv::LDA lda;
    lda.compute(cv::Mat(features * projVecPCA.t()), labels);
    lda.eigenvectors().convertTo(projVecLDA, CV_32F);

    return features * projVecPCA.t() * projVecLDA;
}
Exemplo n.º 27
0
static cv::Mat hist(const cv::Mat &mat, int range)
{
    cv::Mat mt = mat.t();
    cv::Mat hist = cv::Mat::zeros(mt.rows, range + 1, CV_32F);

    for (int i=0; i<mt.rows; i++)
    {
        const float *m = mt.ptr<float>(i);
        float *h = hist.ptr<float>(i);

        for (int j=0; j<mt.cols; j++)
        {
            h[int(m[j])] += 1;
        }
    }

    return hist.t();
}
Exemplo n.º 28
0
//==============================================================================
void 
myTrackFunc2(cv::Mat &/*im*/,   //image containing object
	     cv::Mat &s,    //current shape describing object
	     cv::Mat &dxdp, //shape model jacobian at shape
	     cv::Mat &H,    //contains Hessian on return
	     cv::Mat &g,    //contains gradient on return
	     void* data)    //additional data
{
  myTrackData2* d = (myTrackData2*)data;
  cv::Mat pose = d->tracker->_clm._pglobl(cv::Rect(0,1,1,3));
  d->tracker->_atm.BuildLinearSystem(d->img,s,dxdp,pose,H,g);   
  //  if(d->calculate){d->tracker->_ksmooth.Predict(im,s,d->mu,d->cov,d->covi);}
  if(d->mu.rows > 0){
    H *= d->gamma; g *= d->gamma;
    H += (1.0-d->gamma)*dxdp.t()*d->covi*dxdp;
    g += (1.0-d->gamma)*dxdp.t()*d->covi*(d->mu - s); 
  }
  d->calculate = false; return;
}
Exemplo n.º 29
0
Mat Classifier::svm_test(cv::Mat& testData, cv::Mat& testClasses, std::string fileName)
{
    // SVM load
    CvSVM svm;

    svm.load(fileName.c_str());

    //Mat prediccion(testClasses.rows,testClasses.cols,CV_32FC1);
    Mat prediccion;

    for (int i=0; i<=testClasses.rows-1; ++i)
    {
        prediccion.push_back(svm.predict(testData.row(i)));
    }

    cout << "Matriz Clases: " << testClasses.t() << endl;
    cout << "Matriz Predic: " << prediccion.t() << endl;
    return prediccion;
}
Exemplo n.º 30
0
void fast_hammingnorm_match_neighborhood(const cv::Mat& des1, const cv::Mat& des2, const cv::Mat& neighborhood,
										 ListIdxCorrespondance& _list, const float ratio, const int parscore)
{
	cv::setNumThreads(3);
	MatchList good;
	good.clear();
	bool flipcorrespondence = false;
	if ( des1.rows <= des2.rows ){
		HammingMatchBatch _hmtbb(des1, des2,neighborhood, ratio, parscore);
		cv::parallel_for_(cv::Range(0, des1.rows), _hmtbb, 3);
		_hmtbb.getResult(good);
	} else {
		flipcorrespondence = true;
		cv::Mat _neight=neighborhood.t();
		HammingMatchBatch _hmtbb(des2, des1, _neight, ratio, parscore);
		cv::parallel_for_(cv::Range(0, des2.rows), _hmtbb, 3);
		_hmtbb.getResult(good);
	}	
	correspondance_list(good, _list, flipcorrespondence);
}