Esempio n. 1
0
void ExtendedKalmanFilter::correctionStep(const Eigen::Vector3f& measurement, const Eigen::Vector3f& global_marker_pose){  // compare expected and measured values, update state and uncertainty


    // compute expected measurement:
    Vector3f z_exp; // z_exp = h(x)
    z_exp(0) = cos(state(2))*(global_marker_pose(0)- state(0)) + sin(state(2))*(global_marker_pose(1)-state(1));
    z_exp(1) = -sin(state(2))*(global_marker_pose(0) -state(0)) + cos(state(2))*(global_marker_pose(1)-state(1));
    z_exp(2) =  global_marker_pose(2)-state(2);

    Matrix3f H; // dh/dx
    H << -cos(state(2)), -sin(state(2)), -sin(state(2))*(global_marker_pose(0) - state(0)) + cos(state(2))*(global_marker_pose(1)-state(1)),
            sin(state(2)), -cos(state(2)), -cos(state(2))*(global_marker_pose(0) -state(0)) - sin(state(2))*(global_marker_pose(1)-state(1)),
            0,                  0, -1;

    // cout << "H: " << endl << H << endl;

    Matrix3f K = sigma * H.transpose() * ((H * sigma * H.transpose() + R).inverse()); // Kalman Gain

    // compute the difference between expectation and observation
    Vector3f z_diff = measurement - z_exp;
    // normalize angle
    z_diff(2) = atan2(sin(z_diff(2)),cos(z_diff(2)));

    // cout << "z_exp: " << z_exp(0) << " "<< z_exp(1) << " "<< z_exp(2) << endl;
    // cout << "diff: " << z_diff(0) << " "<< z_diff(1) << " "<< z_diff(2) << endl;

    // correct pose estimate
    state = state + K*( z_diff );
    sigma = (Matrix3f::Identity() - K*H)*sigma;


}
Esempio n. 2
0
float mmf::OptSO3vMFCF::conjugateGradientCUDA_impl(Matrix3f& R, float res0,
    uint32_t n, uint32_t maxIter) {
  Eigen::Matrix3f N = Eigen::Matrix3f::Zero();
  // tauR_*R^T is the contribution of the motion prior between two
  // frames to regularize solution in case data exists only on certain
  // axes
  if (this->t_ >= 1) N += tauR_*R.transpose();
  for (uint32_t j=0; j<6; ++j) { 
    Eigen::Vector3f m = Eigen::Vector3f::Zero();
    m(j/2) = j%2==0?1.:-1.;
    N += m*this->cld_.xSums().col(j).transpose();
  }
  Eigen::JacobiSVD<Eigen::Matrix3f> svd(N,Eigen::ComputeThinU|Eigen::ComputeThinV);
  if (svd.matrixV().determinant()*svd.matrixU().determinant() > 0)
    R = svd.matrixV()*svd.matrixU().transpose();
  else
    R = svd.matrixV()*Eigen::Vector3f(1.,1.,-1.).asDiagonal()*svd.matrixU().transpose();

//  if (R.determinant() < 0.) {
//    //R *= -1.;
////    R = svd.matrixV()*Eigen::Vector3f(1.,1.,-1.).asDiagonal()*svd.matrixU().transpose();
//    std::cout << "determinant of R < 0" << std::endl;
//  }
//  std::cout << R.determinant() << std::endl;
//  std::cout << "N" << std::endl << N << std::endl;
//  std::cout << "R" << std::endl << R << std::endl;
//  std::cout << this->cld_.xSums() << std::endl;
  return (N*R).trace();
}
Esempio n. 3
0
void Deform::update_Ri()
{
    Matrix3f Si;
    MatrixXf Di;
    Matrix3Xf Pi_Prime;
    Matrix3Xf Pi;
    for (int i = 0; i != P_Num; ++i) {
        Di = MatrixXf::Zero(adj_list[i].size(), adj_list[i].size());
        Pi_Prime.resize(3, adj_list[i].size());
        Pi.resize(3, adj_list[i].size());
        // if there is not any single unconnected point this for loop can have a more efficient representation
        for (decltype(adj_list[i].size()) j = 0; j != adj_list[i].size(); ++j) {
            Di(j, j) = Weight.coeffRef(i, adj_list[i][j]);
            Pi.col(j) = P.col(i) - P.col(adj_list[i][j]);
            Pi_Prime.col(j) = P_Prime.col(i) - P_Prime.col(adj_list[i][j]);
        }
        Si = Pi * Di * Pi_Prime.transpose();
        Matrix3f Ui;
        Vector3f Wi;
        Matrix3f Vi;
        wunderSVD3x3(Si, Ui, Wi, Vi);
        R[i] = Vi * Ui.transpose();

        if (R[i].determinant() < 0)
            std::cout << "determinant is negative!" << std::endl;
    }
}
void CCubicGrids::integrateFeatures( const pcl::device::Intr& cCam_, const Matrix3f& Rw_, const Vector3f& Tw_, int nFeatureScale_,
												  GpuMat& pts_curr_, GpuMat& nls_curr_,
												  GpuMat& gpu_key_points_curr_, const GpuMat& gpu_descriptor_curr_, const GpuMat& gpu_distance_curr_, const int nEffectiveKeypoints_,
												  GpuMat& gpu_inliers_, const int nTotalInliers_){

	using namespace btl::device;

	//Note: the point cloud int cFrame_ must be transformed into world before calling it, i.e. it integrate a VMap NMap in world to the volume in world
	//get VMap and NMap in world
	Matrix3f tmp = Rw_;
	pcl::device::Mat33& devRwCurTrans = pcl::device::device_cast<pcl::device::Mat33> ( tmp );	//device cast do the transpose implicitly because eimcmRwCur is col major by default.
	//Cw = -Rw'*Tw
	Eigen::Vector3f eivCwCur = - Rw_.transpose() * Tw_ ;
	float3& devCwCur = pcl::device::device_cast<float3> (eivCwCur);
	
	//locate the volume coordinate for each feature, if the feature falls outside the volume, just remove it.
	cuda_nonmax_suppress_n_integrate ( _intrinsics, devRwCurTrans, devCwCur, _VolumeResolution,
								_gpu_YXxZ_tsdf, _fVolumeSizeM, _fVoxelSizeM,
								_fFeatureVoxelSizeM, _nFeatureScale, _vFeatureResolution, _gpu_YXxZ_vg_idx,
								pts_curr_, nls_curr_, gpu_key_points_curr_, gpu_descriptor_curr_, gpu_distance_curr_, nEffectiveKeypoints_,
								gpu_inliers_, nTotalInliers_, &_gpu_feature_volume_coordinate, &_gpu_counter,
								&_vTotalGlobal, &_gpu_global_3d_key_points, &_gpu_global_descriptors);

	PRINT(_vTotalGlobal[0]);
	return;
}
Esempio n. 5
0
/// Stabilizes mount relative to the Earth's frame
/// Inputs:
///    _roll_control_angle   desired roll       angle in radians,
///    _tilt_control_angle   desired tilt/pitch angle in radians,
///    _pan_control_angle    desired pan/yaw    angle in radians
/// Outputs:
///    _roll_angle           stabilized roll       angle in degrees,
///    _tilt_angle           stabilized tilt/pitch angle in degrees,
///    _pan_angle            stabilized pan/yaw    angle in degrees
void
AP_Mount::stabilize()
{
#if MNT_STABILIZE_OPTION == ENABLED
    // only do the full 3D frame transform if we are doing pan control
    if (_stab_pan) {
        Matrix3f m;                         ///< holds 3 x 3 matrix, var is used as temp in calcs
        Matrix3f cam;                       ///< Rotation matrix earth to camera. Desired camera from input.
        Matrix3f gimbal_target;             ///< Rotation matrix from plane to camera. Then Euler angles to the servos.
        m = _ahrs.get_dcm_matrix();
        m.transpose();
        cam.from_euler(_roll_control_angle, _tilt_control_angle, _pan_control_angle);
        gimbal_target = m * cam;
        gimbal_target.to_euler(&_roll_angle, &_tilt_angle, &_pan_angle);
        _roll_angle  = _stab_roll ? degrees(_roll_angle) : degrees(_roll_control_angle);
        _tilt_angle  = _stab_tilt ? degrees(_tilt_angle) : degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_angle);
    } else {
        // otherwise base mount roll and tilt on the ahrs
        // roll/tilt attitude, plus any requested angle
        _roll_angle  = degrees(_roll_control_angle);
        _tilt_angle  = degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_control_angle);
        if (_stab_roll) {
            _roll_angle -= degrees(_ahrs.roll);
        }
        if (_stab_tilt) {
            _tilt_angle -= degrees(_ahrs.pitch);
        }
    }
#endif
}
Esempio n. 6
0
float OptSO3::linesearch(Matrix3f& R, Matrix3f& M_t_min, const Matrix3f& H, 
    float N, float t_max, float dt)
{
  Matrix3f A = R.transpose() * H;

  EigenSolver<MatrixXf> eig(A);
  MatrixXcf U = eig.eigenvectors();
  MatrixXcf invU = U.inverse();
  VectorXcf d = eig.eigenvalues();
#ifndef NDEBUG
  cout<<"A"<<endl<<A<<endl;
  cout<<"U"<<endl<<U<<endl;
  cout<<"d"<<endl<<d<<endl;
#endif

  Matrix3f R_t_min=R;
  float f_t_min = 999999.0f;
  float t_min = 0.0f;
  //for(int i_t =0; i_t<10; i_t++)
  for(float t =0.0f; t<t_max; t+=dt)
  {
    //float t= ts[i_t];
    VectorXcf expD = ((d*t).array().exp());
    MatrixXf MN = (U*expD.asDiagonal()*invU).real();
    Matrix3f R_t = R*MN.topLeftCorner(3,3);

    float detR = R_t.determinant();
    float maxDeviationFromI = ((R_t*R_t.transpose() 
          - Matrix3f::Identity()).cwiseAbs()).maxCoeff();
    if ((R_t(0,0)==R_t(0,0)) 
        && (abs(detR-1.0f)< 1e-2) 
        && (maxDeviationFromI <1e-1))
    {
      float f_t = evalCostFunction(R_t)/float(N);
#ifndef NDEBUG
      cout<< " f_t = "<<f_t<<endl;
#endif
      if (f_t_min > f_t && f_t != 0.0f)
      {
        R_t_min = R_t;
        M_t_min = MN.topLeftCorner(3,3);
        f_t_min = f_t;
        t_min = t;
      }
    }else{
      cout<<"R_t is corruputed detR="<<detR
        <<"; max deviation from I="<<maxDeviationFromI 
        <<"; nans? "<<R_t(0,0)<<" f_t_min="<<f_t_min<<endl;
    }
  }
  if(f_t_min == 999999.0f) return f_t_min;
  // case where the MN is nans
  R = R_t_min;
#ifndef NDEBUG
#endif
  cout<<"R: det(R) = "<<R.determinant()<<endl<<R<<endl;
  cout<< "t_min="<<t_min<<" f_t_min="<<f_t_min<<endl;
  return f_t_min; 
}
Vector3f CalibrationNode::getLogTheta(Matrix3f R){

	//Assumption R is never an Identity
	float theta = acos((R.trace()-1)/2);
	Matrix3f logTheta = 0.5*theta/sin(theta)*(R-R.transpose());
	return Vector3f(logTheta(2,1), logTheta(0,2), logTheta(1,0));

}
Matrix4f Camera::getViewMatrix() const {
   Matrix3f camCoords;
   camCoords[0] = orthoNormRightDirection;
   camCoords[1] = orthoNormUpDirection;
   camCoords[2] = orthoNormViewDirection;
   Matrix4f camTranslate = makeTranslation4f(-position);
   return expand3To4(camCoords.transpose())*camTranslate;
}
Esempio n. 9
0
void OptSO3::updateGandH(Matrix3f& G, Matrix3f& G_prev, Matrix3f& H, 
    const Matrix3f& R, const Matrix3f& J, const Matrix3f& M_t_min,
    bool resetH)
{
  G_prev = G;
  G = J - R * J.transpose() * R;
  G = R*enforceSkewSymmetry(R.transpose()*G);

  if(resetH)
  {
    H= -G;
  }else{
    Matrix3f tauH = H * M_t_min; //- R_prev * (RR.transpose() * N_t_min);
    float gamma = ((G-G_prev)*G).trace()/(G_prev*G_prev).trace();
    H = -G + gamma * tauH;
    H = R*enforceSkewSymmetry(R.transpose()*H);
  }
}
Esempio n. 10
0
void RealtimeMF_openni::normals_cb(float* d_normals, uint8_t* haveData,
    uint32_t w, uint32_t h)
{
  tLog_.tic(-1); // reset all timers
  int32_t nComp = 0;
  float* d_nComp = this->normalExtract->d_normalsComp(nComp);
  Matrix3f kRwBefore = kRw_;
  tLog_.toctic(0,1);

  optSO3_->updateExternalGpuNormals(d_nComp,nComp,3,0);
  double residual = optSO3_->conjugateGradientCUDA(kRw_,nCGIter_);
  double D_KL = optSO3_->D_KL_axisUnif();
  tLog_.toctic(1,2);

  {
    boost::mutex::scoped_lock updateLock(this->updateModelMutex);
    this->normalsImg_ = this->normalExtract->normalsImg();
    if(z_.rows() != w*h) z_.resize(w*h);
    this->normalExtract->uncompressCpu(optSO3_->z().data(), optSO3_->z().rows(),
        z_.data(), z_.rows());

    mfAxes_ = MatrixXf::Zero(3,6);
    for(uint32_t k=0; k<6; ++k){
      int j = k/2; // which of the rotation columns does this belong to
      float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
      mfAxes_.col(k) = sign*kRw_.col(j);
    }
    D_KL_= D_KL;
    residual_ = residual;
    this->update_ = true;
    updateLock.unlock();
  }

  tLog_.toc(2); // total time
  tLog_.logCycle();
  cout<<"delta rotation kRw_ = \n"<<kRwBefore*kRw_.transpose()<<endl;
  cout<<"---------------------------------------------------------------------------"<<endl;
  tLog_.printStats();
  cout<<" residual="<<residual_<<"\t D_KL="<<D_KL_<<endl;
  cout<<"---------------------------------------------------------------------------"<<endl;
  fout_<<D_KL_<<" "<<residual_<<endl; fout_.flush();

////  return kRw_;
//  {
//    boost::mutex::scoped_lock updateLock(this->updateModelMutex);
////    pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr nDispPtr =
////      normalExtract->normalsPc();
////    nDisp_ = pcl::PointCloud<pcl::PointXYZRGB>::Ptr( new
////        pcl::PointCloud<pcl::PointXYZRGB>(*nDispPtr));
////    this->normalsImg_ = this->normalExtract->normalsImg();
//    this->update_ = true;
//  }
};
Esempio n. 11
0
void ModelRender::render(WorldGraphics *world, const Environment *envi, const Matrix4f &mvp, unsigned int)
{
    if(!world->model.isVisible()) return;

    program.bind();
    Matrix4f mat=mvp;
    Matrix4f nmat;
    Matrix3f mv;

    Model * tmp;

    int count;
    Model ** list=world->model.renderList(count);
    for(int i=0;i<count;i++)
    {
        tmp=list[i];
        mat=tmp->getMatrix()*mvp;

        nmat=tmp->getMatrix();
        nmat.inverse();

        mv[0]=nmat[0];
        mv[1]=nmat[1];
        mv[2]=nmat[2];

        mv[3]=nmat[4];
        mv[4]=nmat[5];
        mv[5]=nmat[6];

        mv[6]=nmat[8];
        mv[7]=nmat[9];
        mv[8]=nmat[10];
        mv.transpose();

        if(tmp==world->model.hightlighted())
            program.uniform(uniform_selected,true);
        else
            program.uniform(uniform_selected,false);
        program.uniformMatrix(uniform_mv,mv);
        program.uniformMatrix(uniform_mvp,mat);
        program.uniformMatrix(uniform_model,tmp->getMatrix());

        ModelMesh * mesh=tmp->getMesh();
        render(mesh);
    }

    program.release();
}
Esempio n. 12
0
void ModelRender::render(const Matrix4f &mvp, unsigned int)
{
    program.bind();
    Matrix4f mat=mvp;
    Matrix4f nmat;
    Matrix3f mv;

    glEnableVertexAttribArray(attribute_v_coord);
    glEnableVertexAttribArray(attribute_normal);
    glEnableVertexAttribArray(attribute_texcoord);

    Model * tmp;
    while(base->renderCount()>0)
    {
        tmp=base->popRender();
        mat=tmp->getMatrix()*mvp;

        nmat=tmp->getMatrix();
        nmat.inverse();

        mv[0]=nmat[0];
        mv[1]=nmat[1];
        mv[2]=nmat[2];

        mv[3]=nmat[4];
        mv[4]=nmat[5];
        mv[5]=nmat[6];

        mv[6]=nmat[8];
        mv[7]=nmat[9];
        mv[8]=nmat[10];
        mv.transpose();

        program.uniform(uniform_selected,tmp->isSelected());
        program.uniformMatrix(uniform_mvp,mat);
        program.uniformMatrix(uniform_mv,mv);

        renderModel(tmp);
        tmp=tmp->next;
    }

    glDisableVertexAttribArray(attribute_v_coord);
    glDisableVertexAttribArray(attribute_normal);
    glDisableVertexAttribArray(attribute_texcoord);

    program.unbind();
}
Esempio n. 13
0
void UKFPose2D::poseSensorUpdate(const Vector3f& reading, const Matrix3f& readingCov)
{
  generateSigmaPoints();

  // computePoseReadings
  Vector3f poseReadings[7];
  for(int i = 0; i < 7; ++i)
    poseReadings[i] = sigmaPoints[i];

  // computeMeanOfPoseReadings
  Vector3f poseReadingMean = poseReadings[0];
  for(int i = 1; i < 7; ++i)
    poseReadingMean += poseReadings[i];
  poseReadingMean *= 1.f / 7.f;

  // computeCovOfPoseReadingsAndSigmaPoints
  Matrix3f poseReadingAndMeanCov = Matrix3f::Zero();
  for(int i = 0; i < 3; ++i)
  {
    const Vector3f d1 = poseReadings[i * 2 + 1] - poseReadingMean;
    poseReadingAndMeanCov += (Matrix3f() << d1 * l(0, i), d1 * l(1, i), d1 * l(2, i)).finished();
    const Vector3f d2 = poseReadings[i * 2 + 2] - poseReadingMean;
    poseReadingAndMeanCov += (Matrix3f() << d2 * -l(0, i), d2 * -l(1, i), d2 * -l(2, i)).finished();
  }
  poseReadingAndMeanCov *= 0.5f;

  // computeCovOfPoseReadingsReadings
  Matrix3f poseReadingCov;
  const Vector3f d = poseReadings[0] - poseReadingMean;
  poseReadingCov << d * d.x(), d * d.y(), d * d.z();
  for(int i = 1; i < 7; ++i)
  {
    const Vector3f d = poseReadings[i] - poseReadingMean;
    poseReadingCov += (Matrix3f() << d * d.x(), d * d.y(), d * d.z()).finished();
  }
  poseReadingCov *= 0.5f;

  poseReadingMean.z() = Angle::normalize(poseReadingMean.z());
  const Matrix3f kalmanGain = poseReadingAndMeanCov.transpose() * (poseReadingCov + readingCov).inverse();
  Vector3f innovation = reading - poseReadingMean;
  innovation.z() = Angle::normalize(innovation.z());
  const Vector3f correction = kalmanGain * innovation;
  mean += correction;
  mean.z() = Angle::normalize(mean.z());
  cov -= kalmanGain * poseReadingAndMeanCov;
  Covariance::fixCovariance(cov);
}
Esempio n. 14
0
/// Stabilizes mount relative to the Earth's frame
/// Inputs:
///    _roll_control_angle   desired roll       angle in radians,
///    _tilt_control_angle   desired tilt/pitch angle in radians,
///    _pan_control_angle    desired pan/yaw    angle in radians
/// Outputs:
///    _roll_angle           stabilized roll       angle in degrees,
///    _tilt_angle           stabilized tilt/pitch angle in degrees,
///    _pan_angle            stabilized pan/yaw    angle in degrees
void
AP_Mount::stabilize()
{
#if MNT_STABILIZE_OPTION == ENABLED
    // only do the full 3D frame transform if we are doing pan control
    if (_stab_pan) {
        Matrix3f m;                         ///< holds 3 x 3 matrix, var is used as temp in calcs
        Matrix3f cam;                       ///< Rotation matrix earth to camera. Desired camera from input.
        Matrix3f gimbal_target;             ///< Rotation matrix from plane to camera. Then Euler angles to the servos.
        m = _ahrs.get_dcm_matrix();
        m.transpose();
        cam.from_euler(_roll_control_angle, _tilt_control_angle, _pan_control_angle);
        gimbal_target = m * cam;
        gimbal_target.to_euler(&_roll_angle, &_tilt_angle, &_pan_angle);
        _roll_angle  = _stab_roll ? degrees(_roll_angle) : degrees(_roll_control_angle);
        _tilt_angle  = _stab_tilt ? degrees(_tilt_angle) : degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_angle);
    } else {
        // otherwise base mount roll and tilt on the ahrs
        // roll/tilt attitude, plus any requested angle
        _roll_angle  = degrees(_roll_control_angle);
        _tilt_angle  = degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_control_angle);
        if (_stab_roll) {
            _roll_angle -= degrees(_ahrs.roll);
        }
        if (_stab_tilt) {
            _tilt_angle -= degrees(_ahrs.pitch);
        }

        // Add lead filter.
        const Vector3f &gyro = _ahrs.get_gyro();

        if (_stab_roll && _roll_stb_lead != 0.0f && fabsf(_ahrs.pitch) < M_PI/3.0f) {
            // Compute rate of change of euler roll angle
            float roll_rate = gyro.x + (_ahrs.sin_pitch() / _ahrs.cos_pitch()) * (gyro.y * _ahrs.sin_roll() + gyro.z * _ahrs.cos_roll());
            _roll_angle -= degrees(roll_rate) * _roll_stb_lead;
        }

        if (_stab_tilt && _pitch_stb_lead != 0.0f) {
            // Compute rate of change of euler pitch angle
            float pitch_rate = _ahrs.cos_pitch() * gyro.y - _ahrs.sin_roll() * gyro.z;
            _tilt_angle -= degrees(pitch_rate) * _pitch_stb_lead;
        }
    }
#endif
}
Esempio n. 15
0
Surface makeSurfRev(const Curve &profile, unsigned steps)
{
    Surface surface;
    
    if (!checkFlat(profile))
    {
        cerr << "surfRev profile curve must be flat on xy plane." << endl;
        exit(0);
    }
    double angle = 2*PI/steps;
    Matrix3f M =  Matrix3f((float)cos(angle),0,(float)sin(angle),
			   0,1,0,
			   (float)(-sin(angle)),0,(float)cos(angle));
    M.transpose();
    Curve c = profile;
    int numPoints = c.size();
    for(unsigned s=0;s<steps;s++){
      Curve newc;
      for (unsigned i=0;i<numPoints;i++){
	CurvePoint cp  = c[i];
	surface.VV.push_back(cp.V);
	surface.VN.push_back(-cp.N);
	Vector3f newV = M*cp.V;
	Vector3f newN = M*cp.N;
	newN.normalize();
	struct CurvePoint newP = {newV, cp.T,newN,cp.B};
	newc.push_back(newP);
      }
      c = newc;
      newc.clear();
    }

    int another;
    for(unsigned s=0;s<steps;s++){
      if(s==steps-1){
        another=0;
      }else{
	another = s+1;
      }
      for(unsigned i=0;i<numPoints-1;i++){
	surface.VF.push_back(Tup3u(s*numPoints+i,another*numPoints+i,another*numPoints+i+1));
	surface.VF.push_back(Tup3u(s*numPoints+i,another*numPoints+i+1,s*numPoints+i+1));
      }
    }
    return surface;
}
void PointWithNormalMerger::computeAccumulator() {
  // Compute skin.
  _indexImage.resize(480*_scale, 640*_scale);
  Matrix3f _scaledCameraMatrix = _cameraMatrix;
  _scaledCameraMatrix.block<2, 3>(0, 0) *= _scale;
  _points.toIndexImage(_indexImage, _depthImage, _scaledCameraMatrix, Isometry3f::Identity());
  
  // Compute sigma for each point and create image accumulator.
  PixelMapper pm;
  pm.setCameraMatrix(_scaledCameraMatrix);
  pm.setTransform(Isometry3f::Identity());
  Matrix3f inverseCameraMatrix = _scaledCameraMatrix.inverse();
  float fB = (_baseLine * _scaledCameraMatrix(0, 0)); // kinect baseline * focal lenght;
  CovarianceAccumulator covAcc;
  _covariances.resize(480*_scale, 640*_scale);
  _covariances.fill(covAcc);
  for(size_t i = 0; i < _points.size(); i++ ) {
    PointWithNormal &p = _points[i];
    Vector2i coord = pm.imageCoords(pm.projectPoint(p.point()));
    if(coord[0] < 0 || coord[0] >= _depthImage.cols() ||
       coord[1] < 0 || coord[1] >= _depthImage.rows())
      continue;
    int index = _indexImage(coord[1], coord[0]);
    float skinZ = _depthImage(coord[1], coord[0]);
    Vector3f normal = p.normal();
    Vector3f skinNormal = _points[index].normal();
    float z = p[2];
    if(abs(z - skinZ) > 0.05)
      continue;
    if(acosf(skinNormal.dot(normal)) > M_PI/4.0f)
      continue;
    float zVariation = (_alpha*z*z)/(fB+z*_alpha);
    zVariation *= zVariation;
    Diagonal3f imageCovariance(1.0f, 1.0f, zVariation);
    Matrix3f covarianceJacobian;
    covarianceJacobian <<
      z, 0, (float)coord[0],
      0, z, (float)coord[1],
      0, 0, 1;
    covarianceJacobian = inverseCameraMatrix*covarianceJacobian;
    Matrix3f worldCovariance = covarianceJacobian * imageCovariance * covarianceJacobian.transpose();
    _covariances(coord[1], coord[0])._omegaAcc += worldCovariance.inverse();
    _covariances(coord[1], coord[0])._pointsAcc += worldCovariance.inverse()*p.point();
    _covariances(coord[1], coord[0])._used = true;
  }
}
Esempio n. 17
0
// stabilize - stabilizes the mount relative to the Earth's frame
//  input: _angle_ef_target_rad (earth frame targets in radians)
//  output: _angle_bf_output_deg (body frame angles in degrees)
void AP_Mount_Servo::stabilize()
{
    AP_AHRS &ahrs = AP::ahrs();
    // only do the full 3D frame transform if we are doing pan control
    if (_state._stab_pan) {
        Matrix3f m;                         ///< holds 3 x 3 matrix, var is used as temp in calcs
        Matrix3f cam;                       ///< Rotation matrix earth to camera. Desired camera from input.
        Matrix3f gimbal_target;             ///< Rotation matrix from plane to camera. Then Euler angles to the servos.
        m = ahrs.get_rotation_body_to_ned();
        m.transpose();
        cam.from_euler(_angle_ef_target_rad.x, _angle_ef_target_rad.y, _angle_ef_target_rad.z);
        gimbal_target = m * cam;
        gimbal_target.to_euler(&_angle_bf_output_deg.x, &_angle_bf_output_deg.y, &_angle_bf_output_deg.z);
        _angle_bf_output_deg.x  = _state._stab_roll ? degrees(_angle_bf_output_deg.x) : degrees(_angle_ef_target_rad.x);
        _angle_bf_output_deg.y  = _state._stab_tilt ? degrees(_angle_bf_output_deg.y) : degrees(_angle_ef_target_rad.y);
        _angle_bf_output_deg.z = degrees(_angle_bf_output_deg.z);
    } else {
        // otherwise base mount roll and tilt on the ahrs
        // roll/tilt attitude, plus any requested angle
        _angle_bf_output_deg.x = degrees(_angle_ef_target_rad.x);
        _angle_bf_output_deg.y = degrees(_angle_ef_target_rad.y);
        _angle_bf_output_deg.z = degrees(_angle_ef_target_rad.z);
        if (_state._stab_roll) {
            _angle_bf_output_deg.x -= degrees(ahrs.roll);
        }
        if (_state._stab_tilt) {
            _angle_bf_output_deg.y -= degrees(ahrs.pitch);
        }

        // lead filter
        const Vector3f &gyro = ahrs.get_gyro();

        if (_state._stab_roll && !is_zero(_state._roll_stb_lead) && fabsf(ahrs.pitch) < M_PI/3.0f) {
            // Compute rate of change of euler roll angle
            float roll_rate = gyro.x + (ahrs.sin_pitch() / ahrs.cos_pitch()) * (gyro.y * ahrs.sin_roll() + gyro.z * ahrs.cos_roll());
            _angle_bf_output_deg.x -= degrees(roll_rate) * _state._roll_stb_lead;
        }

        if (_state._stab_tilt && !is_zero(_state._pitch_stb_lead)) {
            // Compute rate of change of euler pitch angle
            float pitch_rate = ahrs.cos_pitch() * gyro.y - ahrs.sin_roll() * gyro.z;
            _angle_bf_output_deg.y -= degrees(pitch_rate) * _state._pitch_stb_lead;
        }
    }
}
Esempio n. 18
0
void transformDistributions(
    Vector3fVector& means,
    Matrix3fVector& covariances,
    const AffineTransform& transform)
{
    Matrix3f R = transform.rotation();
    Vector3f t = transform.translation();
    Matrix3f R_T = R.transpose();

    unsigned int size = means.size();
    for(unsigned int i = 0; i < size; ++i)
    {
        Vector3f& m = means[i];
        Matrix3f& c = covariances[i];
        m = R * m + t;
        c = R * c * R_T;
    }
}
Esempio n. 19
0
// --------- deprecated --------------
void OptSO3::rectifyRotation(Matrix3f& R)
{
  float detR = R.determinant();
  if (abs(detR-1.0) <1e-6) return;
  // use projection of R onto SO3 to rectify the rotation matrix

  //cout<<"det(R)="<<R.determinant()<<endl<<R<<endl;

  Matrix3f M = R.transpose()*R;
  EigenSolver<Matrix3f> eig(M);
  Matrix3cf U = eig.eigenvectors();
  Vector3cf d = eig.eigenvalues();
  if (d(2).real() > 1e-6)
  {
    // http://lcvmwww.epfl.ch/new/publications/data/articles/63/simaxpaper.pdf
    // Eq. (3.7)
    // Moakher M (2002). "Means and averaging in the group of rotations."
    d = ((d.array().sqrt()).array().inverse());
    Matrix3cf D = d.asDiagonal();
    D(2,2) *= detR>0.0?1.0f:-1.0f;
    R = R*(U*D*U.transpose()).real();
  }else{
    //http://www.ti.inf.ethz.ch/ew/courses/GCMB07/material/lecture03/HornOrthonormal.pdf
    //Horn; Closed-FormSolutionofAbsoluteOrientation UsingOrthonormalMatrices
    d = ((d.array().sqrt()).array().inverse());
    d(2) = 0.0f;
    Matrix3cf Sp = d.asDiagonal(); 
    JacobiSVD<Matrix3f> svd(R*Sp.real());
    R = R*Sp.real() + (detR>0.0?1.0f:-1.0f)*svd.matrixU().col(2)*svd.matrixV().col(2).transpose();
  }
  //    Matrix3d M = (R.transpose()*R).cast<double>();
  //    EigenSolver<Matrix3d> eig(M);
  //    MatrixXd U = eig.eigenvectors();
  //    Matrix3d D = eig.eigenvalues();

  //cout<<"det(R)="<<R.determinant()<<endl<<R<<endl;

  //      R.col(0).normalize();
  //      R.col(2) = R.col(0).cross(R.col(1));
  //      R.col(2).normalize();
  //      R.col(1) = R.col(2).cross(R.col(0));
  //cout<<"det(R)="<<R.determinant()<<endl<<R<<endl;
} 
Esempio n. 20
0
File: main.cpp Progetto: juhl/Strang
void TestEigenSolver() {
	Matrix3f A = Matrix3f::Random(3, 3);
	A = A.transpose() * A; // make A to symmetric positive (semi) definite
	Vector3f b = Vector3f::Random(3);
	Vector3f x;

	std::cout << "=====================" << std::endl;
	std::cout << "Testing Eigen solvers" << std::endl;
	std::cout << "=====================" << std::endl;

	// A must be (real) symmetric 
	SelfAdjointEigenSolver<Matrix3f> selfAdjointEigenSolver(A);
	std::cout << "Eigenvalues = " << selfAdjointEigenSolver.eigenvalues().transpose() << std::endl;
	std::cout << "Eigenvectors = " << selfAdjointEigenSolver.eigenvectors() << std::endl;

	// NOTE: sort by yourself
	EigenSolver<Matrix3f> eigenSolver(A);
	std::cout << "Eigenvalues = " << eigenSolver.eigenvalues().transpose() << std::endl;
	std::cout << "Eigenvectors = " << eigenSolver.eigenvectors() << std::endl;
}
void visualizerShowCamera(const Matrix3f& R, const Vector3f& _t, float r, float g, float b, double s = 0.01 /*downscale factor*/, const std::string& name = "") {
	std::string name_ = name,line_name = name + "line";
	if (name.length() <= 0) {
		stringstream ss; ss<<"camera"<<iCamCounter++;
		name_ = ss.str();
		ss << "line";
		line_name = ss.str();
	}
	
	Vector3f t = -R.transpose() * _t;

	Vector3f vright = R.row(0).normalized() * s;
	Vector3f vup = -R.row(1).normalized() * s;
	Vector3f vforward = R.row(2).normalized() * s;

	Vector3f rgb(r,g,b);

	pcl::PointCloud<pcl::PointXYZRGB> mesh_cld;
	mesh_cld.push_back(Eigen2PointXYZRGB(t,rgb));
	mesh_cld.push_back(Eigen2PointXYZRGB(t + vforward + vright/2.0 + vup/2.0,rgb));
	mesh_cld.push_back(Eigen2PointXYZRGB(t + vforward + vright/2.0 - vup/2.0,rgb));
	mesh_cld.push_back(Eigen2PointXYZRGB(t + vforward - vright/2.0 + vup/2.0,rgb));
	mesh_cld.push_back(Eigen2PointXYZRGB(t + vforward - vright/2.0 - vup/2.0,rgb));

	//TODO Mutex acquire
	pcl::PolygonMesh pm;
	pm.polygons.resize(6); 
	for(int i=0;i<6;i++)
		for(int _v=0;_v<3;_v++)
			pm.polygons[i].vertices.push_back(ipolygon[i*3 + _v]);
	pcl::toROSMsg(mesh_cld,pm.cloud);
	bShowCam = true;
	cam_meshes.push_back(std::make_pair(name_,pm));
	//TODO mutex release

	linesToShow.push_back(std::make_pair(line_name,
		AsVector(Eigen2Eigen(t,rgb),Eigen2Eigen(t + vforward*3.0,rgb))
		));
}
Esempio n. 22
0
Sphere Sphere::fit_sphere(const MatrixX3f& points)
{
    const VectorXf& x = points.col(0);
    const VectorXf& y = points.col(1);
    const VectorXf& z = points.col(2);

    VectorXf point_means = points.colwise().mean();

    VectorXf x_mean_free = x.array() - point_means(0);
    VectorXf y_mean_free = y.array() - point_means(1);
    VectorXf z_mean_free = z.array() - point_means(2);

    Matrix3f A;
    A << (x.cwiseProduct(x_mean_free)).mean(), 2*(x.cwiseProduct(y_mean_free)).mean(), 2*(x.cwiseProduct(z_mean_free)).mean(),
                                            0,   (y.cwiseProduct(y_mean_free)).mean(), 2*(y.cwiseProduct(z_mean_free)).mean(),
                                            0,                                      0,   (z.cwiseProduct(z_mean_free)).mean();

    Matrix3f A_T = A.transpose();
    A += A_T;

    Vector3f b;
    VectorXf sq_sum = x.array().pow(2)+y.array().pow(2)+z.array().pow(2);
    b << (sq_sum.cwiseProduct(x_mean_free)).mean(),
         (sq_sum.cwiseProduct(y_mean_free)).mean(),
         (sq_sum.cwiseProduct(z_mean_free)).mean();

    Vector3f center = A.ldlt().solve(b);

    MatrixX3f tmp(points.rows(),3);
    tmp.col(0) = x.array() - center(0);
    tmp.col(1) = y.array() - center(1);
    tmp.col(2) = z.array() - center(2);

    float r = sqrt(tmp.array().pow(2).rowwise().sum().mean());

    return Sphere(center, r);
}
Esempio n. 23
0
File: main.cpp Progetto: juhl/Strang
void TestLinearSystemSolver() {
	Matrix3f A = Matrix3f::Random(3, 3);
	A = A.transpose() * A; // make A to symmetric positive (semi) definite
	Vector3f b = Vector3f::Random(3);
	Vector3f x;
	float relativeError; // check the solution really exist with this

	std::cout << "=============================" << std::endl;
	std::cout << "Testing linear system solvers" << std::endl;
	std::cout << "=============================" << std::endl;

	// A must be invertible
	PartialPivLU<Matrix3f> partialPivLU(A);
	x = partialPivLU.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using partial pivoting LU = " << std::endl << x.transpose() << std::endl;
	}

	FullPivLU<Matrix3f> fullPivLU(A);
	x = fullPivLU.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using full pivoting LU = " << std::endl << x.transpose() << std::endl;
	}

	HouseholderQR<Matrix3f> householderQR(A);
	x = householderQR.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using Householder QR = " << std::endl << x.transpose() << std::endl;
	}

	ColPivHouseholderQR<Matrix3f> colPivHouseholderQR(A);
	x = colPivHouseholderQR.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using column pivoting Householder QR = " << std::endl << x.transpose() << std::endl;
	}

	FullPivHouseholderQR<Matrix3f> fullPivHouseholderQR(A);
	x = fullPivHouseholderQR.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using full pivoting Householder QR = " << std::endl << x.transpose() << std::endl;
	}

	// A must be positive definite
	LLT<Matrix3f> llt(A);
	x = llt.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using Cholesky decomposition = " << std::endl << x.transpose() << std::endl;
	}

	// A must not be indefinite
	LDLT<Matrix3f> ldlt(A);
	x = ldlt.solve(b);
	relativeError = (A * x - b).norm() / b.norm();
	if (relativeError < 0.00001) {
		std::cout << "Solution using LDLT decomposition = " << std::endl << x.transpose() << std::endl;
	}
}
Esempio n. 24
0
// Get traversability of a pose
bool TensorMap::isTraversable(const geometry_msgs::Pose& pose, bool v) {
    traversability_timer.start();
    const Vector3f position(poseToVector(pose));
    const CellKey& key(positionToIndex(position));
    const TensorCell& cell((*this)[key]);

    // check it is easily traversable
    if (isEasilyTraversable(key)) {
        traversability_timer.stop();
        return true;
    }
    // if not, check in details

    // getting parameters
    const TraversabilityParams& params = tensor_map_params->traversability;
    const float min_stick_sal = params.min_saliency;
    const float max_orientation_angle = params.max_slope;
    const int stick_sal_threshold = params.max_points_in_free_cell;
    const float min_free_cell_ratio = params.min_free_cell_ratio;
    const int nb_max_points = params.max_points_in_bounding_box;
    const int nb_min_support = params.min_support_points;
    const float length = params.length;
    const float width = params.width;
    const float height = params.height;
    const float l_2 = length/2. + params.inflation;
    const float w_2 = width/2. + params.inflation;
    const float ground_buffer = params.ground_buffer;
    const float buf2 = ground_buffer/2;
    const float h_2 = height/2;
    const float h0_2 = (height + buf2)/2;
    const float h1_2 = (height - buf2)/2;

    // check for enough saliency
    if (cell.stick_sal<min_stick_sal) {
        if (v) {
            cout << "Saliency too low: "<<cell.stick_sal<<"/"<<
                 min_stick_sal<<endl;
        }
        traversability_timer.stop();
        return false;
    }
    // check orientation is correct
    if (cell.angle_to_vertical>max_orientation_angle) {
        if (v) {
            cout << "Angle too steep: "<<cell.angle_to_vertical<<"/"<<
                 max_orientation_angle<<endl;
        }
        traversability_timer.stop();
        return false;
    }
    /*
     * check absence of obstacle
     */
    // align orientation to surface
    Matrix3f alignedAxes;
    alignQuaternion(pose.orientation, cell.normal, alignedAxes);
    Vector3f x = alignedAxes.col(0);
    Vector3f y = alignedAxes.col(1);
    Vector3f z = alignedAxes.col(2);
    // generate bounding box
    Vector3f max_corner = l_2*x.array().abs()+\
                          w_2*y.array().abs()+\
                          h0_2*z.array().abs();
    Vector3f center = position + h_2*z;
    CellKey min_key = positionToIndex(center - max_corner);
    CellKey max_key = positionToIndex(center + max_corner);
    // shifted up to separate support layer FIXME
    center += (buf2/2)*z;
    // look through bounding box for possible obstacles
    float nb_robot_cells = 0.;
    float nb_ok_robot_cells = 0.;
    int nb_points_in_cells = 0;
    int nb_support_points = 0;
    for (int i=min_key.i; i<max_key.i+1; ++i)
        for (int j=min_key.j; j<max_key.j+1; ++j)
            for (int k=min_key.k; k<max_key.k+1; ++k) {
                // check cell is in the robot
                TensorCell c = (*this)[CellKey(i, j, k)];
                Vector3f rel_pos = c.position - center;
                Vector3f rel_pos_aligned = alignedAxes.transpose()*rel_pos;
                float rel_x, rel_y, rel_z;
                rel_x = rel_pos_aligned(0);
                rel_y = rel_pos_aligned(1);
                rel_z = rel_pos_aligned(2);
                if ((fabs(rel_x)>l_2)||(fabs(rel_y)>w_2)) {
                    continue;
                }
                if (fabs(rel_z)>h1_2) {
                    if (fabs(rel_z+h_2)<buf2) {
                        nb_support_points += c.nb_points;
                    }
                } else {
                    // check no obstacle
                    nb_robot_cells += 1;
                    if (c.nb_points<=stick_sal_threshold) {
                        nb_ok_robot_cells += 1;
                    } else if (c.angle_to_vertical <= max_orientation_angle) {
                        nb_ok_robot_cells += 1;
                        nb_points_in_cells += c.nb_points;
                    }
                }
            }
    // exit if not enough free cells
    if ((nb_ok_robot_cells/nb_robot_cells)<min_free_cell_ratio)
    {
        if (v) {
            cout << "not enough free cells: "<<
                 nb_ok_robot_cells<<"/"<<
                 min_free_cell_ratio*nb_robot_cells<<" ("<<
                 nb_robot_cells<<")"<<endl;
        }
        traversability_timer.stop();
        return false;
    }
    if (nb_points_in_cells>nb_max_points) {
        if (v) {
            cout << "too many (ground) points in cells: "<<
                 nb_points_in_cells<<"/"<<nb_max_points<<endl;
        }
        traversability_timer.stop();
        return false;
    }
    if (nb_support_points<nb_min_support) {
        if (v) {
            cout << "too few support points in cells: "<<
                 nb_support_points<<"/"<<nb_min_support<<endl;
        }
        traversability_timer.stop();
        return false;
    }
    // cell seems valid
    if (v) {
        cout << "valid" << endl;
    }
    traversability_timer.stop();
    return true;

}
void CalibrationNode::performEstimation(){
	if(rotationRB_vec.size() < 5 ){
		std::cout << "Insufficient data" << std::endl;
		return;
	}

	//perform least squares estimation
	Matrix3f M;
	Matrix4f rbi, rbj, cbi, cbj;
	Matrix4f A, B;

	Matrix3f I;
	I.setIdentity();

	MatrixXf C(0,3), bA(0,1), bB(0,1);

	Vector3f ai, bi;

	VectorXf V_tmp;
	MatrixXf C_tmp;

	M.setZero();

	for(int i=0; i < (int)rotationRB_vec.size(); i++){
		for(int j=0; j < (int)rotationRB_vec.size(); j++){
			if(i!=j){
				rbi << rotationRB_vec[i] , translationRB_vec[i] ,  0, 0, 0, 1;
				rbj << rotationRB_vec[j] , translationRB_vec[j] ,  0, 0, 0, 1;
				A = rbj.inverse()*rbi;

				cbi << rotationCB_vec[i] , translationCB_vec[i] ,  0, 0, 0, 1;
				cbj << rotationCB_vec[j] , translationCB_vec[j] ,  0, 0, 0, 1;
				B = cbj*cbi.inverse();

				ai = getLogTheta(A.block(0,0,3,3));
				bi = getLogTheta(B.block(0,0,3,3));

				M += bi*ai.transpose();

				MatrixXf C_tmp = C;
				C.resize(C.rows()+3, NoChange);
				C << C_tmp,  Matrix3f::Identity() - A.block(0,0,3,3);

				V_tmp = bA;
				bA.resize(bA.rows()+3, NoChange);
				bA << V_tmp, A.block(0,3,3,1);

				V_tmp = bB;
				bB.resize(bB.rows()+3, NoChange);
				bB << V_tmp, B.block(0,3,3,1);

			}//end of if i!=j
		}
	}//end of for(.. i < rotationRB_vec.size(); ..)

#if ESTIMATION_DEBUG
	std::cout << "M = [ " << M << " ]; " << endl;
#endif

	EigenSolver<Matrix3f> es(M.transpose()*M);
	Matrix3cf D = es.eigenvalues().asDiagonal();
	Matrix3cf V = es.eigenvectors();

	Matrix3cf Lambda = D.inverse().array().sqrt();
	Matrix3cf Theta_X = V * Lambda * V.inverse() * M.transpose();
	std::cout << "Orientation of Camera Frame with respect to Robot tool-tip frame." << std::endl;
	std::cout << "Theta_X = [ " << Theta_X.real()  << " ]; " << endl;

	//Estimating translational offset
	for(int i=0; i < bB.rows()/3; i++){
		bB.block(i*3,0,3,1) = Theta_X.real()*bB.block(i*3,0,3,1);
	}
	bA = bA - bB; // this is d. saving memory

	std::cout << "Translation of Camera Frame with respect to Robot tool-tip frame." << std::endl;
	cout << "bX = [ " << (C.transpose()*C).inverse() * C.transpose() * bA << " ]; " << endl;

}
Esempio n. 26
0
// Get faster traversability
bool TensorMap::isEasilyTraversable(const CellKey& key) {
    TensorCell& cell((*this)[key]);
    // check cache
    if (cell.traversability!=UNKNOWN) {
        return (cell.traversability==OK);
    }
    // compute it
    const bool v = false;

    // getting parameters
    const TraversabilityParams& params = tensor_map_params->traversability;
    const float min_stick_sal = params.min_saliency;
    const float max_orientation_angle = params.max_slope;
    const int stick_sal_threshold = params.max_points_in_free_cell;
    const float min_free_cell_ratio = params.min_free_cell_ratio;
    const int nb_max_points = params.max_points_in_bounding_box;
    const int nb_min_support = params.min_support_points;
    const float diameter = params.diameter;
    const float d_2 = diameter/2. + params.inflation;
    const float r2 = d_2*d_2;
    const float height = params.height;
    const float ground_buffer = params.ground_buffer;
    const float buf2 = ground_buffer/2;
    const float h_2 = height/2;
    const float h0_2 = (height + buf2)/2;
    const float h1_2 = (height - buf2)/2;

    // check for enough saliency
    if (cell.stick_sal<min_stick_sal) {
        if (v) {
            cout << "Saliency too low: "<<cell.stick_sal<<"/"<<
                 min_stick_sal<<endl;
        }
        cell.traversability = NOT_SALIENCY;
        return false;
    }
    // check orientation is correct
    if (cell.angle_to_vertical>max_orientation_angle) {
        if (v) {
            cout << "Angle too steep: "<<cell.angle_to_vertical<<"/"<<
                 max_orientation_angle<<endl;
        }
        cell.traversability = NOT_ANGLE;
        return false;
    }
    /*
     * check absence of obstacle
     */
    // align orientation to surface
    Matrix3f alignedAxes;
    geometry_msgs::Quaternion quat;
    quat.x = quat.y = quat.z = 0;
    quat.w = 1;
    alignQuaternion(quat, cell.normal, alignedAxes);
    Vector3f x = alignedAxes.col(0);
    Vector3f y = alignedAxes.col(1);
    Vector3f z = alignedAxes.col(2);
    // generate bounding box
    Vector3f max_corner = d_2*x.array().abs()+\
                          d_2*y.array().abs()+\
                          h0_2*z.array().abs();
    Vector3f center = cell.position + h_2*z;
    CellKey min_key = positionToIndex(center - max_corner);
    CellKey max_key = positionToIndex(center + max_corner);
    // shifted up to separate support layer FIXME
    center += buf2/2*z;
    // look through bounding box for possible obstacles
    float nb_robot_cells = 0.;
    float nb_ok_robot_cells = 0.;
    int nb_points_in_cells = 0;
    int nb_support_points = 0;
    for (int i=min_key.i; i<max_key.i+1; ++i)
        for (int j=min_key.j; j<max_key.j+1; ++j)
            for (int k=min_key.k; k<max_key.k+1; ++k) {
                // check cell is in the robot
                TensorCell c = (*this)[CellKey(i, j, k)];
                Vector3f rel_pos = c.position - center;
                Vector3f rel_pos_aligned = alignedAxes.transpose()*rel_pos;
                float rel_x, rel_y, rel_z;
                rel_x = rel_pos_aligned(0);
                rel_y = rel_pos_aligned(1);
                rel_z = rel_pos_aligned(2);
                if (pow(rel_x, 2)+pow(rel_y, 2)>r2) {
                    continue;
                }
                if (fabs(rel_z)>h1_2) {
                    if (fabs(rel_z+h_2)<buf2) {
                        nb_support_points += c.nb_points;
                    }
                } else {
                    // check no obstacle
                    nb_robot_cells += 1;
                    if (c.nb_points<=stick_sal_threshold) {
                        nb_ok_robot_cells += 1;
                    } else if (c.angle_to_vertical <= max_orientation_angle) {
                        nb_ok_robot_cells += 1;
                        nb_points_in_cells += c.nb_points;
                    }
                }
            }
    // exit if not enough free cells
    if ((nb_ok_robot_cells/nb_robot_cells)<min_free_cell_ratio)
    {
        if (v) {
            cout << "not enough free cells: "<<
                 nb_ok_robot_cells<<"/"<<
                 min_free_cell_ratio*nb_robot_cells<<" ("<<
                 nb_robot_cells<<")"<<endl;
        }
        cell.traversability = NOT_OBSTACLE;
        return false;
    }
    if (nb_points_in_cells>nb_max_points) {
        if (v) {
            cout << "too many (ground) points in cells: "<<
                 nb_points_in_cells<<"/"<<nb_max_points<<endl;
        }
        cell.traversability = NOT_IN_GROUND;
        return false;
    }
    if (nb_support_points<nb_min_support) {
        if (v) {
            cout << "too few support points in cells: "<<
                 nb_support_points<<"/"<<nb_min_support<<endl;
        }
        cell.traversability = NOT_IN_AIR;
        return false;
    }
    // cell seems valid
    if (v) {
        cout << "valid" << endl;
    }
    cell.traversability = OK;
    return true;
}