int main(int, char**)
{
  cout.precision(3);
  Matrix3f m;
m.row(0) << 1, 2, 3;
m.block(1,0,2,2) << 4, 5, 7, 8;
m.col(2).tail(2) << 6, 9;		    
std::cout << m;

  return 0;
}
// setup _Bearth
void Compass::_setup_earth_field(void)
{
    // assume a earth field strength of 400
    _hil.Bearth(400, 0, 0);
    
    // rotate _Bearth for inclination and declination. -66 degrees
    // is the inclination in Canberra, Australia
    Matrix3f R;
    R.from_euler(0, ToRad(66), get_declination());
    _hil.Bearth = R * _hil.Bearth;
}
Beispiel #3
0
void Camera::SetView(const Vector3f &position, const Vector3f &target,
                     const Vector3f &up) {
  Matrix3f rotation;
  rotation.row(1) = up.normalized();
  rotation.row(2) = (position - target).normalized();
  rotation.row(0) = rotation.row(1).cross(rotation.row(2));
  view_.topLeftCorner<3, 3>() = rotation;
  view_.topRightCorner<3, 1>() = -rotation * position;
  view_(3, 3) = 1.0;
  matrix_dirty_ = true;
}
int main(int, char**)
{
  cout.precision(3);
  Matrix3f m;
m = AngleAxisf(0.25*M_PI, Vector3f::UnitX())
  * AngleAxisf(0.5*M_PI,  Vector3f::UnitY())
  * AngleAxisf(0.33*M_PI, Vector3f::UnitZ());
cout << m << endl << "is unitary: " << m.isUnitary() << endl;

  return 0;
}
int main()
{
   Matrix3f A;
   Vector3f b;
   A << 1,2,3,  4,5,6,  7,8,10;
   b << 3, 3, 4;
   cout << "Here is the matrix A:\n" << A << endl;
   cout << "Here is the vector b:\n" << b << endl;
   Vector3f x = A.colPivHouseholderQr().solve(b);
   cout << "The solution is:\n" << x << endl;
}
Beispiel #6
0
void RealtimeMF_openni::normals_cb(float* d_normals, uint8_t* haveData,
    uint32_t w, uint32_t h)
{
  tLog_.tic(-1); // reset all timers
  int32_t nComp = 0;
  float* d_nComp = this->normalExtract->d_normalsComp(nComp);
  Matrix3f kRwBefore = kRw_;
  tLog_.toctic(0,1);

  optSO3_->updateExternalGpuNormals(d_nComp,nComp,3,0);
  double residual = optSO3_->conjugateGradientCUDA(kRw_,nCGIter_);
  double D_KL = optSO3_->D_KL_axisUnif();
  tLog_.toctic(1,2);

  {
    boost::mutex::scoped_lock updateLock(this->updateModelMutex);
    this->normalsImg_ = this->normalExtract->normalsImg();
    if(z_.rows() != w*h) z_.resize(w*h);
    this->normalExtract->uncompressCpu(optSO3_->z().data(), optSO3_->z().rows(),
        z_.data(), z_.rows());

    mfAxes_ = MatrixXf::Zero(3,6);
    for(uint32_t k=0; k<6; ++k){
      int j = k/2; // which of the rotation columns does this belong to
      float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
      mfAxes_.col(k) = sign*kRw_.col(j);
    }
    D_KL_= D_KL;
    residual_ = residual;
    this->update_ = true;
    updateLock.unlock();
  }

  tLog_.toc(2); // total time
  tLog_.logCycle();
  cout<<"delta rotation kRw_ = \n"<<kRwBefore*kRw_.transpose()<<endl;
  cout<<"---------------------------------------------------------------------------"<<endl;
  tLog_.printStats();
  cout<<" residual="<<residual_<<"\t D_KL="<<D_KL_<<endl;
  cout<<"---------------------------------------------------------------------------"<<endl;
  fout_<<D_KL_<<" "<<residual_<<endl; fout_.flush();

////  return kRw_;
//  {
//    boost::mutex::scoped_lock updateLock(this->updateModelMutex);
////    pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr nDispPtr =
////      normalExtract->normalsPc();
////    nDisp_ = pcl::PointCloud<pcl::PointXYZRGB>::Ptr( new
////        pcl::PointCloud<pcl::PointXYZRGB>(*nDispPtr));
////    this->normalsImg_ = this->normalExtract->normalsImg();
//    this->update_ = true;
//  }
};
Beispiel #7
0
// static
SimilarityTransform SimilarityTransform::fromMatrix( const Matrix4f& m )
{
    Matrix3f r = m.getSubmatrix3x3();
    float s = r.getRow( 0 ).norm();

    return
    {
        s,
        r / s,
        m.getCol( 3 ).xyz
    };
}
Beispiel #8
0
Eigen::Matrix4f ConsistencyTest::initPose2D( std::map<unsigned, unsigned> &matched_planes )
{
  //Calculate rotation
  Matrix3f normalCovariances = Matrix3f::Zero();
  for(map<unsigned, unsigned>::iterator it = matched_planes.begin(); it != matched_planes.end(); it++)
    normalCovariances += PBMTarget.vPlanes[it->second].v3normal * PBMSource.vPlanes[it->first].v3normal.transpose();
  normalCovariances(1,1) += 100; // Rotation "restricted" to the y axis

  JacobiSVD<MatrixXf> svd(normalCovariances, ComputeThinU | ComputeThinV);
  Matrix3f Rotation = svd.matrixU() * svd.matrixV().transpose();

  if(Rotation.determinant() < 0)
//    Rotation.row(2) *= -1;
    Rotation = -Rotation;

  // Calculate translation
  Vector3f translation;
  Vector3f center_data = Vector3f::Zero(), center_model = Vector3f::Zero();
  Vector3f centerFull_data = Vector3f::Zero(), centerFull_model = Vector3f::Zero();
  unsigned numFull = 0, numNonStruct = 0;
  for(map<unsigned, unsigned>::iterator it = matched_planes.begin(); it != matched_planes.end(); it++)
  {
    if(PBMSource.vPlanes[it->first].bFromStructure) // The certainty in center of structural planes is too low
      continue;

    ++numNonStruct;
    center_data += PBMSource.vPlanes[it->first].v3center;
    center_model += PBMTarget.vPlanes[it->second].v3center;
    if(PBMSource.vPlanes[it->first].bFullExtent)
    {
      centerFull_data += PBMSource.vPlanes[it->first].v3center;
      centerFull_model += PBMTarget.vPlanes[it->second].v3center;
      ++numFull;
    }
  }
  if(numFull > 0)
  {
    translation = (-centerFull_model + Rotation * centerFull_data) / numFull;
  }
  else
  {
    translation = (-center_model + Rotation * center_data) / numNonStruct;
  }

  translation[1] = 0; // Restrict no translation in the y axis

  // Form SE3 transformation matrix. This matrix maps the model into the current data reference frame
  Eigen::Matrix4f rigidTransf;
  rigidTransf.block(0,0,3,3) = Rotation;
  rigidTransf.block(0,3,3,1) = translation;
  rigidTransf.row(3) << 0,0,0,1;
  return rigidTransf;
}
int main(int, char**)
{
  cout.precision(3);
  Matrix3f A;
Vector3f b;
A << 1,2,3,  4,5,6,  7,8,10;
b << 3, 3, 4;
Vector3f x = A.inverse() * b;
cout << "The solution is:" << endl << x << endl;

  return 0;
}
Beispiel #10
0
void ExtendedWmlCamera::OrbitVertical( float fAngle )
{
	Matrix3f rotate;
	rotate.FromAxisAngle( GetLeft(), fAngle );
	
	Vector3f direction( GetLocation() - m_ptTarget );
	Vector3f newUp( rotate * GetUp() );
	newUp.Normalize();

	SetTargetFrame( m_ptTarget + (rotate * direction), GetLeft(), 
		newUp, m_ptTarget );
}
Beispiel #11
0
/* evaluate cost function for a given assignment of npormals to axes */
float mmf::OptSO3vMFCF::evalCostFunction(Matrix3f& R)
{
  float c = 0.0f;
  for (uint32_t j=0; j<6; ++j) { 
    if(j%2 ==0){
      c -= this->cld_.xSums().col(j).transpose()*R.col(j/2);
    }else{
      c += this->cld_.xSums().col(j).transpose()*R.col(j/2);
    }
  }
  return c;
}
Beispiel #12
0
//--------------------------------------------------------------------------------
void App::Update()
{
	// Update the timer to determine the elapsed time since last frame.  This can 
	// then used for animation during the frame.

	m_pTimer->Update();

	// Send an event to everyone that a new frame has started.  This will be used
	// in later examples for using the material system with render views.

	EvtManager.ProcessEvent( EvtFrameStartPtr( new EvtFrameStart( m_pTimer->Elapsed() ) ) );


	// Manipulate the scene here - simply rotate the root of the scene in this
	// example.

	Matrix3f rotation;
	rotation.RotationY( m_pTimer->Elapsed() );
	m_pActor->GetNode()->Transform.Rotation() *= rotation;


	// Update the scene, and then render all cameras within the scene.

	m_pScene->Update( m_pTimer->Elapsed() );
	m_pScene->Render( m_pRenderer11 );


	// Perform the rendering and presentation for each window.

	for ( int i = 0; i < NUM_WINDOWS; i++ )
	{
		// Bind the swap chain render target and the depth buffer for use in 
		// rendering.  

		D3D11_BOX box;
		box.left = m_pWindow[i]->GetLeft();
		box.right = m_pWindow[i]->GetWidth() + box.left;
		box.top = m_pWindow[i]->GetTop();
		box.bottom = m_pWindow[i]->GetHeight() + box.top;
		box.front = 0;
		box.back = 1;

		if ( box.left < 0 ) box.left = 0;
		if ( box.right > (unsigned int)m_DesktopRes.x - 1 ) box.right = (unsigned int)m_DesktopRes.x - 1;
		if ( box.top < 0 ) box.top = 0;
		if ( box.bottom > (unsigned int)m_DesktopRes.y - 1 ) box.bottom = (unsigned int)m_DesktopRes.y - 1;

		m_pRenderer11->pImmPipeline->CopySubresourceRegion( m_RenderTarget[i], 0, 0, 0, 0, m_OffscreenTexture, 0, &box );
		m_pRenderer11->Present( m_pWindow[i]->GetHandle(), m_pWindow[i]->GetSwapChain() );
	}

}
Beispiel #13
0
void ExtendedWmlCamera::RotateLateral( float fAngle )
{
	Matrix3f rotate;
	rotate.FromAxisAngle( GetUp(), fAngle );

	Vector3f direction( m_ptTarget - GetLocation () );

	Vector3f newLeft( rotate * GetLeft() );
	newLeft.Normalize();

	SetTargetFrame( GetLocation(), newLeft, GetUp(), 
		GetLocation() + ( rotate * direction ) );
}
Beispiel #14
0
void FPSControls::setUpVector( const Vector3f& y )
{
    Matrix3f b = GeometryUtils::getRightHandedBasis( y );
    m_groundPlaneToWorld.setCol( 0, b.getCol( 1 ) );
    m_groundPlaneToWorld.setCol( 1, b.getCol( 2 ) );
    m_groundPlaneToWorld.setCol( 2, b.getCol( 0 ) );

    m_worldToGroundPlane = m_groundPlaneToWorld.inverse();

    // TODO: snap camera to face up when you change the up vector to something
    //   new rotate along current lookat direction?
    // TODO: reset camera
}
MovableTransProperty::MovableTransProperty (PropertyPage *parent, 
	const std::string &name, const std::string &tag,
	Transform *trans, Object *obj)
	:
Property(parent, name, tag, Property::PT_TRANSFORM, 0),
	mIsRSMatrix(0),
	mPropertyTranslate(0),
	mPropertyRotation(0),
	mPropertyScale(0),
	mPropertyIsUniformScale(0),
	mTrans(trans),
	mObject(obj)
{
	APoint position;
	APoint rotation;
	APoint scale(1.0f, 1.0f, 1.0f);

	bool isRSMatrix = mTrans->IsRSMatrix();
	if (isRSMatrix)
	{
		position = mTrans->GetTranslate();
		Matrix3f mat = mTrans->GetRotate();
		mat.ExtractEulerXYZ(rotation.X(), rotation.Y(), rotation.Z());
		scale = mTrans->GetScale();
		bool isUniformScale = mTrans->IsUniformScale();

		mProperty = parent->mPage->Append(new wxStringProperty(
			name, tag, wxT("<composed>")) );

		mPropertyTranslate = parent->mPage->AppendIn(mProperty, 
			new wxAPoint3Property("Translate", tag+"Translate",
			position));
		mPropertyRotation = parent->mPage->AppendIn(mProperty, 
			new wxAPoint3Property("Rotate", tag+"Rotate", rotation));
		mPropertyScale = parent->mPage->AppendIn(mProperty, 
			new wxAPoint3Property("Scale", tag+"Scale", scale));

		mPropertyIsUniformScale = parent->mPage->AppendIn(mProperty, 
			new wxBoolProperty("IsUniformScale", tag+"IsUniformScale", isUniformScale));
		mPropertyIsUniformScale->Enable(false);
	}
	else
	{
		mProperty = parent->mPage->Append(new wxStringProperty(
			name, tag, wxT("<composed>")) );

		mIsRSMatrix = parent->mPage->AppendIn(mProperty, 
			new wxBoolProperty("IsRSMatrix", tag+"IsRSMatrix", false));
		mIsRSMatrix->Enable(false);
	}
}
Beispiel #16
0
static inline Matrix3f createRotation(const Vector3f & _axis, float angle)
{
	Vector3f axis = normalize(_axis);
	float si = sinf(angle);
	float co = cosf(angle);

	Matrix3f ret;
	ret.setIdentity();

	ret *= co;
	for (int r = 0; r < 3; ++r) for (int c = 0; c < 3; ++c) ret.at(c,r) += (1.0f - co) * axis[c] * axis[r];

	Matrix3f skewmat;
	skewmat.setZeros();
	skewmat.at(1,0) = -axis.z;
	skewmat.at(0,1) =  axis.z;
	skewmat.at(2,0) =  axis.y;
	skewmat.at(0,2) = -axis.y;
	skewmat.at(2,1) =  axis.x;
	skewmat.at(1,2) = -axis.x;
	skewmat *= si;
	ret += skewmat;

	return ret;
}
Beispiel #17
0
void Camera::lookAt(const Vector3f &center, const Vector3f &eye, Vector3f &up) {
    Vector3f z = eye - center;
    z.normalize();
    up.normalize();
    Vector3f x = up.cross(z);
    x.normalize();
    Vector3f y = z.cross(x);
    Matrix3f M;
    M.col(0) = x;
    M.col(1) = y;
    M.col(2) = z;
    std::cout << M << std::endl;
    rotation = Quaternionf(M);
    position = eye;
}
Beispiel #18
0
void ModelRender::render(WorldGraphics *world, const Environment *envi, const Matrix4f &mvp, unsigned int)
{
    if(!world->model.isVisible()) return;

    program.bind();
    Matrix4f mat=mvp;
    Matrix4f nmat;
    Matrix3f mv;

    Model * tmp;

    int count;
    Model ** list=world->model.renderList(count);
    for(int i=0;i<count;i++)
    {
        tmp=list[i];
        mat=tmp->getMatrix()*mvp;

        nmat=tmp->getMatrix();
        nmat.inverse();

        mv[0]=nmat[0];
        mv[1]=nmat[1];
        mv[2]=nmat[2];

        mv[3]=nmat[4];
        mv[4]=nmat[5];
        mv[5]=nmat[6];

        mv[6]=nmat[8];
        mv[7]=nmat[9];
        mv[8]=nmat[10];
        mv.transpose();

        if(tmp==world->model.hightlighted())
            program.uniform(uniform_selected,true);
        else
            program.uniform(uniform_selected,false);
        program.uniformMatrix(uniform_mv,mv);
        program.uniformMatrix(uniform_mvp,mat);
        program.uniformMatrix(uniform_model,tmp->getMatrix());

        ModelMesh * mesh=tmp->getMesh();
        render(mesh);
    }

    program.release();
}
void CCubicGrids::integrateFeatures( const pcl::device::Intr& cCam_, const Matrix3f& Rw_, const Vector3f& Tw_, int nFeatureScale_,
												  GpuMat& pts_curr_, GpuMat& nls_curr_,
												  GpuMat& gpu_key_points_curr_, const GpuMat& gpu_descriptor_curr_, const GpuMat& gpu_distance_curr_, const int nEffectiveKeypoints_,
												  GpuMat& gpu_inliers_, const int nTotalInliers_){

	using namespace btl::device;

	//Note: the point cloud int cFrame_ must be transformed into world before calling it, i.e. it integrate a VMap NMap in world to the volume in world
	//get VMap and NMap in world
	Matrix3f tmp = Rw_;
	pcl::device::Mat33& devRwCurTrans = pcl::device::device_cast<pcl::device::Mat33> ( tmp );	//device cast do the transpose implicitly because eimcmRwCur is col major by default.
	//Cw = -Rw'*Tw
	Eigen::Vector3f eivCwCur = - Rw_.transpose() * Tw_ ;
	float3& devCwCur = pcl::device::device_cast<float3> (eivCwCur);
	
	//locate the volume coordinate for each feature, if the feature falls outside the volume, just remove it.
	cuda_nonmax_suppress_n_integrate ( _intrinsics, devRwCurTrans, devCwCur, _VolumeResolution,
								_gpu_YXxZ_tsdf, _fVolumeSizeM, _fVoxelSizeM,
								_fFeatureVoxelSizeM, _nFeatureScale, _vFeatureResolution, _gpu_YXxZ_vg_idx,
								pts_curr_, nls_curr_, gpu_key_points_curr_, gpu_descriptor_curr_, gpu_distance_curr_, nEffectiveKeypoints_,
								gpu_inliers_, nTotalInliers_, &_gpu_feature_volume_coordinate, &_gpu_counter,
								&_vTotalGlobal, &_gpu_global_3d_key_points, &_gpu_global_descriptors);

	PRINT(_vTotalGlobal[0]);
	return;
}
Beispiel #20
0
void Shard::lightWithBiasSources(Vector3f const *posCoords, Vector4f *colorCoords,
    Matrix3f const &tangentMatrix, duint biasTime)
{
    DENG2_ASSERT(posCoords && colorCoords);
    Vector3f const sufNormal = tangentMatrix.column(2);

    if(d->biasIllums.isEmpty()) return;

    // Is it time to update bias contributors?
    bool biasUpdated = false;
    if(d->needBiasContributorUpdate())
    {
        // Perhaps our owner has updated lighting contributions for us?
        biasUpdated = d->owner->as<world::ClientSubsector>().updateBiasContributors(this);
    }

    // Light the given geometry.
    Vector3f const *posIt    = posCoords;
    Vector4f *colorIt        = colorCoords;
    for(dint i = 0; i < d->biasIllums.count(); ++i, posIt++, colorIt++)
    {
        *colorIt += d->biasIllums[i]->evaluate(*posIt, sufNormal, biasTime);
    }

    if(biasUpdated)
    {
        // Any changes from contributors will have now been applied.
        d->biasTracker.markIllumUpdateCompleted();
    }
}
Beispiel #21
0
float mmf::OptSO3vMFCF::conjugateGradientCUDA_impl(Matrix3f& R, float res0,
    uint32_t n, uint32_t maxIter) {
  Eigen::Matrix3f N = Eigen::Matrix3f::Zero();
  // tauR_*R^T is the contribution of the motion prior between two
  // frames to regularize solution in case data exists only on certain
  // axes
  if (this->t_ >= 1) N += tauR_*R.transpose();
  for (uint32_t j=0; j<6; ++j) { 
    Eigen::Vector3f m = Eigen::Vector3f::Zero();
    m(j/2) = j%2==0?1.:-1.;
    N += m*this->cld_.xSums().col(j).transpose();
  }
  Eigen::JacobiSVD<Eigen::Matrix3f> svd(N,Eigen::ComputeThinU|Eigen::ComputeThinV);
  if (svd.matrixV().determinant()*svd.matrixU().determinant() > 0)
    R = svd.matrixV()*svd.matrixU().transpose();
  else
    R = svd.matrixV()*Eigen::Vector3f(1.,1.,-1.).asDiagonal()*svd.matrixU().transpose();

//  if (R.determinant() < 0.) {
//    //R *= -1.;
////    R = svd.matrixV()*Eigen::Vector3f(1.,1.,-1.).asDiagonal()*svd.matrixU().transpose();
//    std::cout << "determinant of R < 0" << std::endl;
//  }
//  std::cout << R.determinant() << std::endl;
//  std::cout << "N" << std::endl << N << std::endl;
//  std::cout << "R" << std::endl << R << std::endl;
//  std::cout << this->cld_.xSums() << std::endl;
  return (N*R).trace();
}
Beispiel #22
0
bool VRPNTrackerAxis::parseTransform(const std::string& axis,
                           Matrix3f& transformOut )
{
    static const boost::regex k_axis_re("^([xyzXYZ])([xyzXYZ])([xyzXYZ])$");
    static std::map<char,Vector3f> k_axisMap =
            boost::assign::map_list_of ('X', Vector3f::UNIT_X)
                                       ('Y', Vector3f::UNIT_Y)
                                       ('Z', Vector3f::UNIT_Z);

    boost::smatch optResult;
    if (boost::regex_search(axis, optResult, k_axis_re))
    {
        for (size_t r = 0; r < transformOut.ROWS; ++r)
        {
            std::string optionValue(optResult.str(r+1));
            LBASSERT(optionValue.size() == 1);
            const char ax = optionValue[0];
            const float scale = std::isupper(ax) ? 1.f : -1.f;
            const Vector3f v = scale * k_axisMap[std::toupper(ax)];

            transformOut.set_row(r, v);
        }
    }
    else
        return false;

    return true;
}
Beispiel #23
0
void
Compass::null_offsets(const Matrix3f &dcm_matrix)
{
    // Update our estimate of the offsets in the magnetometer
    Vector3f    calc(0.0, 0.0, 0.0);        // XXX should be safe to remove explicit init here as the default ctor should do the right thing
    Matrix3f    dcm_new_from_last;
    float       weight;

    Vector3f mag_body_new = Vector3f(mag_x,mag_y,mag_z);

    if(_null_init_done) {
        dcm_new_from_last = dcm_matrix.transposed() * _last_dcm_matrix;      // Note 11/20/2010: transpose() is not working, transposed() is.

        weight = 3.0 - fabs(dcm_new_from_last.a.x) - fabs(dcm_new_from_last.b.y) - fabs(dcm_new_from_last.c.z);
        if (weight > .001) {
            calc = mag_body_new + _mag_body_last;                // Eq 11 from Bill P's paper
            calc -= dcm_new_from_last * _mag_body_last;
            calc -= dcm_new_from_last.transposed() * mag_body_new;
            if(weight > 0.5) weight = 0.5;
            calc = calc * (weight);
            _offset.set(_offset.get() - calc);
        }
    } else {
        _null_init_done = true;
    }
    _mag_body_last = mag_body_new - calc;
    _last_dcm_matrix = dcm_matrix;

}
Beispiel #24
0
void ModelRender::render(const Matrix4f &mvp, unsigned int)
{
    program.bind();
    Matrix4f mat=mvp;
    Matrix4f nmat;
    Matrix3f mv;

    glEnableVertexAttribArray(attribute_v_coord);
    glEnableVertexAttribArray(attribute_normal);
    glEnableVertexAttribArray(attribute_texcoord);

    Model * tmp;
    while(base->renderCount()>0)
    {
        tmp=base->popRender();
        mat=tmp->getMatrix()*mvp;

        nmat=tmp->getMatrix();
        nmat.inverse();

        mv[0]=nmat[0];
        mv[1]=nmat[1];
        mv[2]=nmat[2];

        mv[3]=nmat[4];
        mv[4]=nmat[5];
        mv[5]=nmat[6];

        mv[6]=nmat[8];
        mv[7]=nmat[9];
        mv[8]=nmat[10];
        mv.transpose();

        program.uniform(uniform_selected,tmp->isSelected());
        program.uniformMatrix(uniform_mvp,mat);
        program.uniformMatrix(uniform_mv,mv);

        renderModel(tmp);
        tmp=tmp->next;
    }

    glDisableVertexAttribArray(attribute_v_coord);
    glDisableVertexAttribArray(attribute_normal);
    glDisableVertexAttribArray(attribute_texcoord);

    program.unbind();
}
Beispiel #25
0
/// Stabilizes mount relative to the Earth's frame
/// Inputs:
///    _roll_control_angle   desired roll       angle in radians,
///    _tilt_control_angle   desired tilt/pitch angle in radians,
///    _pan_control_angle    desired pan/yaw    angle in radians
/// Outputs:
///    _roll_angle           stabilized roll       angle in degrees,
///    _tilt_angle           stabilized tilt/pitch angle in degrees,
///    _pan_angle            stabilized pan/yaw    angle in degrees
void
AP_Mount::stabilize()
{
#if MNT_STABILIZE_OPTION == ENABLED
    // only do the full 3D frame transform if we are doing pan control
    if (_stab_pan) {
        Matrix3f m;                         ///< holds 3 x 3 matrix, var is used as temp in calcs
        Matrix3f cam;                       ///< Rotation matrix earth to camera. Desired camera from input.
        Matrix3f gimbal_target;             ///< Rotation matrix from plane to camera. Then Euler angles to the servos.
        m = _ahrs.get_dcm_matrix();
        m.transpose();
        cam.from_euler(_roll_control_angle, _tilt_control_angle, _pan_control_angle);
        gimbal_target = m * cam;
        gimbal_target.to_euler(&_roll_angle, &_tilt_angle, &_pan_angle);
        _roll_angle  = _stab_roll ? degrees(_roll_angle) : degrees(_roll_control_angle);
        _tilt_angle  = _stab_tilt ? degrees(_tilt_angle) : degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_angle);
    } else {
        // otherwise base mount roll and tilt on the ahrs
        // roll/tilt attitude, plus any requested angle
        _roll_angle  = degrees(_roll_control_angle);
        _tilt_angle  = degrees(_tilt_control_angle);
        _pan_angle   = degrees(_pan_control_angle);
        if (_stab_roll) {
            _roll_angle -= degrees(_ahrs.roll);
        }
        if (_stab_tilt) {
            _tilt_angle -= degrees(_ahrs.pitch);
        }

        // Add lead filter.
        const Vector3f &gyro = _ahrs.get_gyro();

        if (_stab_roll && _roll_stb_lead != 0.0f && fabsf(_ahrs.pitch) < M_PI/3.0f) {
            // Compute rate of change of euler roll angle
            float roll_rate = gyro.x + (_ahrs.sin_pitch() / _ahrs.cos_pitch()) * (gyro.y * _ahrs.sin_roll() + gyro.z * _ahrs.cos_roll());
            _roll_angle -= degrees(roll_rate) * _roll_stb_lead;
        }

        if (_stab_tilt && _pitch_stb_lead != 0.0f) {
            // Compute rate of change of euler pitch angle
            float pitch_rate = _ahrs.cos_pitch() * gyro.y - _ahrs.sin_roll() * gyro.z;
            _tilt_angle -= degrees(pitch_rate) * _pitch_stb_lead;
        }
    }
#endif
}
Beispiel #26
0
void UKFPose2D::poseSensorUpdate(const Vector3f& reading, const Matrix3f& readingCov)
{
  generateSigmaPoints();

  // computePoseReadings
  Vector3f poseReadings[7];
  for(int i = 0; i < 7; ++i)
    poseReadings[i] = sigmaPoints[i];

  // computeMeanOfPoseReadings
  Vector3f poseReadingMean = poseReadings[0];
  for(int i = 1; i < 7; ++i)
    poseReadingMean += poseReadings[i];
  poseReadingMean *= 1.f / 7.f;

  // computeCovOfPoseReadingsAndSigmaPoints
  Matrix3f poseReadingAndMeanCov = Matrix3f::Zero();
  for(int i = 0; i < 3; ++i)
  {
    const Vector3f d1 = poseReadings[i * 2 + 1] - poseReadingMean;
    poseReadingAndMeanCov += (Matrix3f() << d1 * l(0, i), d1 * l(1, i), d1 * l(2, i)).finished();
    const Vector3f d2 = poseReadings[i * 2 + 2] - poseReadingMean;
    poseReadingAndMeanCov += (Matrix3f() << d2 * -l(0, i), d2 * -l(1, i), d2 * -l(2, i)).finished();
  }
  poseReadingAndMeanCov *= 0.5f;

  // computeCovOfPoseReadingsReadings
  Matrix3f poseReadingCov;
  const Vector3f d = poseReadings[0] - poseReadingMean;
  poseReadingCov << d * d.x(), d * d.y(), d * d.z();
  for(int i = 1; i < 7; ++i)
  {
    const Vector3f d = poseReadings[i] - poseReadingMean;
    poseReadingCov += (Matrix3f() << d * d.x(), d * d.y(), d * d.z()).finished();
  }
  poseReadingCov *= 0.5f;

  poseReadingMean.z() = Angle::normalize(poseReadingMean.z());
  const Matrix3f kalmanGain = poseReadingAndMeanCov.transpose() * (poseReadingCov + readingCov).inverse();
  Vector3f innovation = reading - poseReadingMean;
  innovation.z() = Angle::normalize(innovation.z());
  const Vector3f correction = kalmanGain * innovation;
  mean += correction;
  mean.z() = Angle::normalize(mean.z());
  cov -= kalmanGain * poseReadingAndMeanCov;
  Covariance::fixCovariance(cov);
}
int main(int argc, char** argv) {
    ros::init (argc, argv, "pcl_homework");
    ros::NodeHandle n;

    char dir [] = "../dataset-kinect/";
    char rgbPrefix []= "rgb_";
    char depthPrefix []= "depth_";
    char jpgSuffix []= ".jpg";
    char pgmSuffix []= ".pgm";

    rotated << -1, 0, 0, 0,
                0, -1, 0, 0,
                0, 0, -1, 0,
                0, 0, 0, 1;

    camera_matrix<< fx, 0.0f, cx, 0.0f, fy, cy, 0.0f, 0.0f, 1.0f;
    t_mat.setIdentity();
    t_mat.block<3, 3>(0, 0) = camera_matrix.inverse();


    //Uncomment to show Mask Window
    namedWindow("Foreground", CV_WINDOW_AUTOSIZE);

    cvStartWindowThread();

    pcl::visualization::CloudViewer viewer ("Simple Cloud Viewer");

    //Uncomment to create Mask
    BackgroundSubtractorMOG PMOG(20, 4, 0.9);

    // Fill in the cloud data
    cloud->width    = 640;
    cloud->height   = 480;
    cloud->is_dense = false;
    cloud->points.resize (cloud->width * cloud->height);

    for( int i = 400; i < 550; i++ ) {
        char buffer1[100];
        char buffer2[100];
        sprintf(buffer1,"%s%s%d%s", dir, rgbPrefix, i, jpgSuffix);
        sprintf(buffer2,"%s%s%d%s", dir, depthPrefix, i, pgmSuffix);
        rgb_image = readJPG(buffer1);
        depth_image = readPGM(buffer2);

        //Calculate and show Mask
        PMOG(rgb_image,fgMaskMOG);
        imshow( "Foreground", fgMaskMOG );

        createPC();
        if(!viewer.wasStopped()){
            pcl::transformPointCloud( *cloud, *cloud, rotated);
            viewer.showCloud(cloud);
            ros::Duration(0.25, 0).sleep();
        }
    }

        waitKey(0);
        return 0;
}
Beispiel #28
0
Surface makeSurfRev(const Curve &profile, unsigned steps)
{
    Surface surface;
    
    if (!checkFlat(profile))
    {
        cerr << "surfRev profile curve must be flat on xy plane." << endl;
        exit(0);
    }
    double angle = 2*PI/steps;
    Matrix3f M =  Matrix3f((float)cos(angle),0,(float)sin(angle),
			   0,1,0,
			   (float)(-sin(angle)),0,(float)cos(angle));
    M.transpose();
    Curve c = profile;
    int numPoints = c.size();
    for(unsigned s=0;s<steps;s++){
      Curve newc;
      for (unsigned i=0;i<numPoints;i++){
	CurvePoint cp  = c[i];
	surface.VV.push_back(cp.V);
	surface.VN.push_back(-cp.N);
	Vector3f newV = M*cp.V;
	Vector3f newN = M*cp.N;
	newN.normalize();
	struct CurvePoint newP = {newV, cp.T,newN,cp.B};
	newc.push_back(newP);
      }
      c = newc;
      newc.clear();
    }

    int another;
    for(unsigned s=0;s<steps;s++){
      if(s==steps-1){
        another=0;
      }else{
	another = s+1;
      }
      for(unsigned i=0;i<numPoints-1;i++){
	surface.VF.push_back(Tup3u(s*numPoints+i,another*numPoints+i,another*numPoints+i+1));
	surface.VF.push_back(Tup3u(s*numPoints+i,another*numPoints+i+1,s*numPoints+i+1));
      }
    }
    return surface;
}
int main() { 
    Matrix3f m = Matrix3f::Random();
    //std::ptrdiff_t i, j;
    int i, j;
    float minOfM = m.minCoeff(&i,&j);
    cout << "Here is the matrix m:\n" << m << endl;
    cout << "Its minimum coefficient (" << minOfM 
         << ") is at position (" << i << "," << j << ")\n\n";

    RowVector4i v = RowVector4i::Random();
    int maxOfV = v.maxCoeff(&i);
    cout << "Here is the vector v: " << v << endl;
    cout << "Its maximum coefficient (" << maxOfV 
         << ") is at position " << i << endl;

    return 0;
}
void PointWithNormalMerger::computeAccumulator() {
  // Compute skin.
  _indexImage.resize(480*_scale, 640*_scale);
  Matrix3f _scaledCameraMatrix = _cameraMatrix;
  _scaledCameraMatrix.block<2, 3>(0, 0) *= _scale;
  _points.toIndexImage(_indexImage, _depthImage, _scaledCameraMatrix, Isometry3f::Identity());
  
  // Compute sigma for each point and create image accumulator.
  PixelMapper pm;
  pm.setCameraMatrix(_scaledCameraMatrix);
  pm.setTransform(Isometry3f::Identity());
  Matrix3f inverseCameraMatrix = _scaledCameraMatrix.inverse();
  float fB = (_baseLine * _scaledCameraMatrix(0, 0)); // kinect baseline * focal lenght;
  CovarianceAccumulator covAcc;
  _covariances.resize(480*_scale, 640*_scale);
  _covariances.fill(covAcc);
  for(size_t i = 0; i < _points.size(); i++ ) {
    PointWithNormal &p = _points[i];
    Vector2i coord = pm.imageCoords(pm.projectPoint(p.point()));
    if(coord[0] < 0 || coord[0] >= _depthImage.cols() ||
       coord[1] < 0 || coord[1] >= _depthImage.rows())
      continue;
    int index = _indexImage(coord[1], coord[0]);
    float skinZ = _depthImage(coord[1], coord[0]);
    Vector3f normal = p.normal();
    Vector3f skinNormal = _points[index].normal();
    float z = p[2];
    if(abs(z - skinZ) > 0.05)
      continue;
    if(acosf(skinNormal.dot(normal)) > M_PI/4.0f)
      continue;
    float zVariation = (_alpha*z*z)/(fB+z*_alpha);
    zVariation *= zVariation;
    Diagonal3f imageCovariance(1.0f, 1.0f, zVariation);
    Matrix3f covarianceJacobian;
    covarianceJacobian <<
      z, 0, (float)coord[0],
      0, z, (float)coord[1],
      0, 0, 1;
    covarianceJacobian = inverseCameraMatrix*covarianceJacobian;
    Matrix3f worldCovariance = covarianceJacobian * imageCovariance * covarianceJacobian.transpose();
    _covariances(coord[1], coord[0])._omegaAcc += worldCovariance.inverse();
    _covariances(coord[1], coord[0])._pointsAcc += worldCovariance.inverse()*p.point();
    _covariances(coord[1], coord[0])._used = true;
  }
}