Exemplo n.º 1
0
void tfmul() {

    double q0[4], q1[4];
    double T0[12], T1[12];
    double Ua[12], Ub[12];
    aa_vrand( 4, q0 );
    aa_vrand( 4, q1 );
    aa_tf_qnormalize(q0);
    aa_tf_qnormalize(q1);
    aa_tf_quat2rotmat( q0, T0 );
    aa_tf_quat2rotmat( q1, T1 );
    aa_vrand( 3, T0+9 );
    aa_vrand( 3, T1+9 );

    // cross
    aa_tf_12chain(T0,T1,Ua);
    aa_vecm_tfmul(T0,T1,Ub);
    aveq( "tfmul-equal", 12, Ua, Ub, .0001 );

    aa_tick("tfmul non-vec: ");
    for( size_t i = 0; i < N; i ++ ) {
        aa_tf_12chain(T0,T1,Ua);
    }
    aa_tock();

    aa_tick("tfmul vec: ");
    for( size_t i = 0; i < N; i ++ ) {
        aa_vecm_tfmul(T0,T1,Ub);
    }
    aa_tock();
}
Exemplo n.º 2
0
static void eulerzyx(const double *q)
{
    double qm[4], e_q[3], e_R[3];

    aa_tf_quat2eulerzyx(q,e_q);
    aa_tf_qminimize2( q, qm );

    {
        double q_e[4];
        aa_tf_eulerzyx2quat( e_q[0], e_q[1], e_q[2], q_e );
        aa_tf_qminimize( q_e );
        aveq("quat->euler->quat", 4, qm, q_e, .001 );
    }

    {
        double R[9], q_e[4];
        aa_tf_quat2rotmat(q, R);
        aa_tf_rotmat2eulerzyx(R,e_R);
        aa_tf_eulerzyx2quat( e_R[0], e_R[1], e_R[2], q_e );
        aa_tf_qminimize( q_e );
        aveq("quat->euler->quat/rotmat->euler->quat", 4, qm, q_e, .001 );

        aveq("quat->euler/rotmat->quat", 3, e_q, e_R, .001 );
    }

    aveq("quat->euler/rotmat->quat", 3, e_q, e_R, .001 );

}
Exemplo n.º 3
0
static void rotmat(double *q) {
    //double q[4], R[9], w[3], dR[9], dRw[3];
    //aa_tf_qurand( q );
    double R[9], w[3], dR[9], dRw[3];
    aa_vrand( 3, w );
    aa_tf_quat2rotmat(q, R);
    aa_tf_rotmat_vel2diff( R, w, dR );
    aa_tf_rotmat_diff2vel( R, dR, dRw );
    aveq("rotmat-vel", 3, w, dRw, 1e-6 );


}
Exemplo n.º 4
0
static void rotvec(double *q) {
    double e[3], R[9], vr[3];

    aa_tf_quat2rotmat(q, R);
    assert( aa_tf_isrotmat(R) );
    aa_tf_quat2rotvec(q, e);

    aa_tf_rotmat2rotvec(R, vr);

    aveq("rotvec", 3, e, vr, .001 );

    {
        double ee[9], eln[3], qe[4], rln[3];
        aa_tf_rotmat_expv( e, ee );
        aa_tf_rotmat_lnv( ee, rln );
        aveq("rotmat_lnv", 3, e, rln, 1e-6 );
        aa_tf_rotmat2quat( ee, qe );
        aa_tf_quat2rotvec( qe, eln );
        aveq("rotmat_expv", 3, e, eln, 1e-6 );
    }
    {
        double aa[4], ee[9], eln[3], qe[4];
        aa_tf_rotvec2axang(e, aa);
        aa_tf_rotmat_exp_aa( aa, ee );
        aa_tf_rotmat2quat( ee, qe );
        aa_tf_quat2rotvec( qe, eln );
        aveq("rotmat_exp_aa", 3, e, eln, 1e-6 );
    }

    {
        double Rtmp[9];
        aa_tf_rotmat_xy( R+0, R+3, Rtmp );
        aveq( "rotmat_xy", 9, R, Rtmp, 1e-6 );

        aa_tf_rotmat_yz( R+3, R+6, Rtmp );
        aveq( "rotmat_yz", 9, R, Rtmp, 1e-6 );

        aa_tf_rotmat_zx( R+6, R+0, Rtmp );
        aveq( "rotmat_zx", 9, R, Rtmp, 1e-6 );
    }
}
Exemplo n.º 5
0
static void quat(double E[2][7]) {
    double u;
    double *q1 = E[0];
    double *q2 = E[0];
    u = aa_frand();

    {
        double qg[4], qa[4];
        aa_tf_qslerp( u, q1, q2, qg );
        aa_tf_qslerpalg( u, q1, q2, qa );
        aveq("slerp", 4, qg, qa, .001 );

        double dqg[4], dqa[4];
        aa_tf_qslerpdiff( u, q1, q2, dqg );
        aa_tf_qslerpdiffalg( u, q1, q2, dqa );
        aveq("slerpdiff", 4, dqg, dqa, .001 );
    }

    // mul
    {
        double Ql[16], Qr[16];
        double y0[4], y1[4], y2[4];
        aa_tf_qmatrix_l(q1, Ql, 4);
        aa_tf_qmatrix_r(q2, Qr, 4);
        aa_tf_qmul(q1,q2, y0);
        cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4,
                     1.0, Ql, 4,
                     q2, 1,
                     0, y1, 1 );
        cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4,
                     1.0, Qr, 4,
                     q1, 1,
                     0, y2, 1 );
        aveq( "qmul-1", 4, y0, y1, 1e-6 );
        aveq( "qmul-2", 4, y0, y2, 1e-6 );
    }


    // average
    {
        double qq[8], p[4], s[4];
        AA_MEM_CPY( qq, q1, 4 );
        AA_MEM_CPY( qq+4, q2, 4 );
        double w[2] = {.5,.5};
        aa_tf_quat_davenport( 2, w, qq, 4, p );
        aa_tf_qslerp( .5, q1, q2, s );
        aa_tf_qminimize( p );
        aa_tf_qminimize( s );
        aveq("davenport-2", 4, p, s, 1e-4 );
    }

    double R1[9], R2[9], Rr[9], qr[4], qrr[4];
    aa_tf_quat2rotmat(q1, R1);
    aa_tf_quat2rotmat(q2, R2);
    aa_tf_9rel( R1, R2, Rr );
    aa_tf_qrel( q1, q2, qr );
    aa_tf_rotmat2quat( Rr, qrr );
    aa_tf_qminimize( qr );
    aa_tf_qminimize( qrr );
    aveq("qrel", 4, qr, qrr, .001 );

    // minimize
    {
        double qmin[4], axang[4];
        aa_tf_qminimize2( q1, qmin );
        test( "quat-minimize",  aa_feq( fabs(q1[3]), qmin[3], 0) );
        aa_tf_quat2axang( qmin, axang );
        test( "quat-minimize-angle",  fabs(axang[3]) <= M_PI );
    }

    // mulc
    {
        double q1c[4], q2c[4], t1[4], t2[4];
        aa_tf_qconj(q1, q1c);
        aa_tf_qconj(q2, q2c);

        aa_tf_qmul(q1,q2c,t1);
        aa_tf_qmulc(q1,q2,t2);
        aveq("qmulc", 4, t1, t2, .001 );

        aa_tf_qmul(q1c,q2,t1);
        aa_tf_qcmul(q1,q2,t2);
        aveq("qcmul", 4, t1, t2, .001 );
    }
    // conj. props
    {
        // p*q = conj(conj(q) * conj(p))
        double c1[4], c2[4], c2c1[4], cc2c1[4], q1q2[4];
        aa_tf_qconj(q1,c1);
        aa_tf_qconj(q2,c2);
        aa_tf_qmul(c2,c1,c2c1);
        aa_tf_qmul(q1,q2,q1q2);
        aa_tf_qconj(c2c1,cc2c1);
        aveq("conjprop", 4, q1q2, cc2c1, .0001);
    }
    // exp
    {
        double q1e[4], q1eln[4];
        aa_tf_qexp(q1, q1e);
        aa_tf_qln(q1e, q1eln);
        aveq("exp-log", 4, q1, q1eln, .00001 );
        aa_tf_qln(q1, q1eln);
        aa_tf_qexp(q1eln, q1e);
        aveq("log-exp", 4, q1, q1e, .00001 );
    }

    // diff
    double w[3]={0}, dq[4], wdq[3];
    aa_vrand( 3, w );
    aa_tf_qvel2diff( q1, w, dq );
    aa_tf_qdiff2vel( q1, dq, wdq );
    aveq("qveldiff", 3, w, wdq, .000001);

    // integrate

    double qn_rk1[4], qn_vrk1[4], qn_vrk4[4], qn_vexp[4], qn_dq[4], w0[3] = {0};
    double dt = .02;


    aa_tf_qrk1( q1, dq, dt, qn_rk1 );
    aa_tf_qvelrk1( q1, w, dt, qn_vrk1 );
    aa_tf_qvelrk4( q1, w, dt, qn_vrk4 );
    aa_tf_qsvel( q1, w, dt, qn_vexp );
    aa_tf_qsdiff( q1, dq, dt, qn_dq );
    aveq("qvelrk1", 4, qn_rk1, qn_vrk1, .001 );
    aveq("qvelrk4", 4, qn_rk1, qn_vrk4, .001 );
    aveq("qvelexp", 4, qn_vrk4, qn_vexp, .0001);
    aveq("qvelsdiff", 4, qn_vexp, qn_dq, .001 );
    aa_tf_qsvel( q1, w0, dt, qn_vexp );
    aveq("qvelsvel0", 4, q1, qn_vexp, .000 );

    {
        double Rb[9], qR[4];
        aa_tf_qsvel( q1, w, dt, qn_vexp );
        aa_tf_rotmat_svel( R1, w, dt, Rb );
        aa_tf_rotmat2quat( Rb, qR );
        aa_tf_qminimize( qn_vexp);
        aa_tf_qminimize( qR );
        aveq("rotmat_svel", 4, qn_vexp, qR, 1e-4 );
    }

    // vectors
    {
        double *v0 = E[0] + AA_TF_QUTR_T;
        double *v1 = E[1] + AA_TF_QUTR_T;
        double q[4], vp[3];

        // identify case
        aa_tf_vecs2quat( v0, v0, q);
        aveq( "vecs2quat-ident", 4, q, aa_tf_quat_ident, 1e-6 );

        // regular case
        aa_tf_vecs2quat( v0, v1, q);
        aa_tf_qrot(q,v0,vp);

        // normalize result
        {
            double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] );
            double n1 = sqrt(v1[0]*v1[0] + v1[1]*v1[1] + v1[2]*v1[2] );
            double vp1[3];
            for( size_t i = 0; i < 3; i ++ ) {
                vp1[i] = n0*v1[i] / n1;
            }

            aveq("vecs2quat", 3, vp, vp1, 1e-6 );
        }
        // inverted case
        double v0n[3] = {-v0[0], -v0[1], -v0[2]};
        aa_tf_vecs2quat( v0, v0n, q);
        aa_tf_qrot(q,v0,vp);
        {
            double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] );
            double n1 = sqrt(v0n[0]*v0n[0] + v0n[1]*v0n[1] + v0n[2]*v0n[2] );
            double vp1[3];
            for( size_t i = 0; i < 3; i ++ ) {
                vp1[i] = n0*v0n[i] / n1;
            }

            aveq("vecs2quat-degenerate", 3, vp, vp1, 1e-6 );
        }


    }
}