void qrot() { double a[4], b[4], c1[4], c2[4]; aa_vrand( 4, a ); aa_vrand( 4, b ); aa_tf_qnormalize(a); aa_tf_qnormalize(b); aa_tf_qrot(a,b,c1); aa_vecm_qrot(a,b,c2); aveq( "qrot-equal", 3, c1, c2, .0001 ); aa_tick("qrot non-vec: "); for( size_t i = 0; i < N; i ++ ) { aa_tf_qrot(a,b,c1); } aa_tock(); aa_tick("qrot vec: "); for( size_t i = 0; i < N; i ++ ) { aa_vecm_qrot(a,b,c1); } aa_tock(); }
void duqumul() { double q0[4], q1[4]; double v0[3], v1[3]; double d0[8], d1[8]; double ra[8], rb[8]; aa_vrand( 4, q0 ); aa_vrand( 4, q1 ); aa_tf_qnormalize(q0); aa_tf_qnormalize(q1); aa_vrand( 3, v0 ); aa_vrand( 3, v1 ); aa_tf_qv2duqu( q0, v0, d0 ); aa_tf_qv2duqu( q1, v1, d1 ); aa_tf_duqu_mul(q0,q1,ra); aa_vecm_duqu_mul(q0,q1,rb); aveq( "duqu_mul-equal", 8, ra, rb, .0001 ); aa_tick("duqu-mul non-vec: "); for( size_t i = 0; i < N; i ++ ) { aa_tf_duqu_mul(d0,d1,ra); } aa_tock(); aa_tick("duqu-mul vec: "); for( size_t i = 0; i < N; i ++ ) { aa_vecm_duqu_mul(q0,q1,rb); } aa_tock(); }
void qmul() { double a[4], b[4], c1[4], c2[4]; double va[4], vb[4]; aa_vrand( 4, a ); aa_vrand( 4, b ); aa_tf_qnormalize(a); aa_tf_qnormalize(b); aa_vec_d4_st( va, aa_vec_d4_ld(a) ); aa_vec_d4_st( vb, aa_vec_d4_ld(b) ); aveq( "lda", 4, a, va, .000 ); aveq( "ldb", 4, b, vb, .000 ); aa_tf_qmul(a,b,c1); aa_vecm_qmul(a,b,c2); aveq( "quat-equal", 4, c1, c2, .0001 ); aa_tick("qmul non-vec: "); for( size_t i = 0; i < N; i ++ ) { aa_tf_qmul(a,b,c1); } aa_tock(); aa_tick("qmul vec: "); for( size_t i = 0; i < N; i ++ ) { aa_vecm_qmul(a,b,c1); } aa_tock(); }
void rel_q() { // random transforms double q0[4], qrel[4], q1[4]; aa_vrand(4,q0); aa_vrand(4,q1); aa_tf_qnormalize(q0); aa_tf_qnormalize(q1); aa_tf_qcmul(q0, q1, qrel ); // random velocity, point 0 double dx0[3]={0}, dq0[4]; aa_vrand(3,dx0); aa_tf_qvel2diff( q0, dx0, dq0 ); // computed velocity, point 1 double dq1[4], dx1[3]; aa_tf_qmul( dq0, qrel, dq1 ); aa_tf_qdiff2vel( q1, dq1, dx1 ); // integrate both velocities double q0_1[4], q1_1[4]; aa_tf_qsvel( q0, dx0, .1, q0_1 ); aa_tf_qsvel( q1, dx1, .1, q1_1 ); // new relative orientation double qrel_1[4]; aa_tf_qcmul( q0_1, q1_1, qrel_1 ); // check aveq("relq", 4, qrel, qrel_1, .000001); }
void tfmul() { double q0[4], q1[4]; double T0[12], T1[12]; double Ua[12], Ub[12]; aa_vrand( 4, q0 ); aa_vrand( 4, q1 ); aa_tf_qnormalize(q0); aa_tf_qnormalize(q1); aa_tf_quat2rotmat( q0, T0 ); aa_tf_quat2rotmat( q1, T1 ); aa_vrand( 3, T0+9 ); aa_vrand( 3, T1+9 ); // cross aa_tf_12chain(T0,T1,Ua); aa_vecm_tfmul(T0,T1,Ub); aveq( "tfmul-equal", 12, Ua, Ub, .0001 ); aa_tick("tfmul non-vec: "); for( size_t i = 0; i < N; i ++ ) { aa_tf_12chain(T0,T1,Ua); } aa_tock(); aa_tick("tfmul vec: "); for( size_t i = 0; i < N; i ++ ) { aa_vecm_tfmul(T0,T1,Ub); } aa_tock(); }
static void slerp() { double q[4], qy[4], u, du; double dq1[4], dq2[4], dqy[4]; aa_tf_qurand(q); u = aa_frand(); du = aa_frand(); aa_vrand(4,dq1); aa_vrand(4,dq2); aa_tf_qslerpchaindiff( u, du, q, dq1, q, dq2, qy, dqy ); aveq("chaindiff equiv", 4, q, qy, 1e-6); }
void rel_d() { // random transforms double q0[4], v0[3], q1[4], v1[4]; aa_vrand(4,q0); aa_vrand(4,q1); aa_tf_qnormalize(q0); aa_tf_qnormalize(q1); aa_vrand(3,v0); aa_vrand(3,v1); // dual quat transforms double d0[8], drel[8], d1[8], d1p[8]; aa_tf_qv2duqu(q0,v0, d0); aa_tf_qv2duqu(q1,v1, d1); // d0 * drel = d1 // drel = conj(d0) * d1 aa_tf_duqu_cmul( d0, d1, drel ); aa_tf_duqu_mul( d0, drel, d1p ); aveq("duqu-relmul", 8, d1, d1p, .001 ); // random velocity double dx0[6], dd0[8]; aa_vrand(6,dx0); aa_tf_duqu_vel2diff(d0, dx0, dd0); // second velocity // d1 = d0*drel // d1/dt = d0/dt * drel + d0 * drel/dt, and drel/dt = 0 double dd1[8]; aa_tf_duqu_mul( dd0, drel, dd1 ); // integrate double d0_1[8], d1_1[8]; double dt = .1; aa_tf_duqu_sdiff( d0, dd0, dt, d0_1 ); aa_tf_duqu_sdiff( d1, dd1, dt, d1_1 ); aa_tf_duqu_normalize( d0_1 ); aa_tf_duqu_normalize( d1_1 ); // new relative double drel_1[8]; // drel = d0*inv(d1) aa_tf_duqu_cmul( d0_1, d1_1, drel_1 ); // twist double d0_1t[8], d1_1t[8], drel_1t[8]; aa_tf_duqu_svel( d0, dx0, dt, d0_1t ); aa_tf_duqu_sdiff( d1, dd1, dt, d1_1t ); aa_tf_duqu_cmul( d0_1t, d1_1t, drel_1t ); // check aveq("rel_d", 8, drel, drel_1t, 1e-6); }
void qvmul(void) { double q[4], v[4], r1[4], r2[4]; aa_vrand( 3, v ); v[3] = 0; aa_vrand( 4, q ); aa_tf_qmul_qv( q, v, r1); aa_tf_qmul( q, v, r2); aveq( "qmul_qv", 4, r1, r2, 1e-7 ); aa_tf_qmul_vq( v, q, r1); aa_tf_qmul( v, q, r2); aveq( "qmul_v", 4, r1, r2, 1e-7 ); }
static void rotmat(double *q) { //double q[4], R[9], w[3], dR[9], dRw[3]; //aa_tf_qurand( q ); double R[9], w[3], dR[9], dRw[3]; aa_vrand( 3, w ); aa_tf_quat2rotmat(q, R); aa_tf_rotmat_vel2diff( R, w, dR ); aa_tf_rotmat_diff2vel( R, dR, dRw ); aveq("rotmat-vel", 3, w, dRw, 1e-6 ); }
static void tfmat() { double v[6], evR[9], ev[12], lneR[3], lne[6]; aa_vrand( 6, v ); aa_tf_tfmat_expv( v, ev ); aa_tf_rotmat_expv( v+3, evR ); aveq( "rotmat-tfmat-exp", 9, evR, ev, 1e-6 ); aa_tf_tfmat_lnv( ev, lne ); aa_tf_rotmat_lnv( evR, lneR ); aveq( "rotmat-exp-ln", 3, v+3, lneR, 1e-6 ); aveq( "tfmat-exp-ln", 6, v, lne, 1e-6 ); }
void cross() { double a[3], b[3], c1[3], c2[3]; aa_vrand( 3, a ); aa_vrand( 3, b ); // cross aa_tf_cross(a,b,c1); aa_vecm_cross(a,b,c2); aveq( "cross-equal", 3, c1, c2, .0001 ); aa_tick("cross non-vec: "); for( size_t i = 0; i < N; i ++ ) { aa_tf_cross(a,b,c1); } aa_tock(); aa_tick("cross vec: "); for( size_t i = 0; i < N; i ++ ) { aa_vecm_cross(a,b,c1); } aa_tock(); }
int main( void ) { // init time_t seed = time(NULL); printf("seed: %ld\n", seed); srand((unsigned int)seed); // might break in 2038 aa_test_ulimit(); for( size_t i = 0; i < 1000; i++ ) { /* Random Data */ static const size_t k=2; double E[2][7], S[2][8], T[2][12], dx[2][6]; for( size_t j = 0; j < k; j ++ ) { rand_tf(E[j], S[j], T[j]); aa_vrand(6,dx[j]); } //printf("%d\n",i); /* Run Tests */ rotvec(E[0]); euler(dx[0]); euler1(dx[0]); eulerzyx(E[0]); chain(E,S,T); quat(E); duqu(); rel_q(); rel_d(); slerp(); theta2quat(); rotmat(E[0]); tfmat(); tfmat_inv(T[0]); mzlook(dx[0]+0, dx[0]+3, dx[1]+0); integrate(E[0], S[0], T[0], dx[0]); tf_conj(E, S); qdiff(E,dx); } return 0; }
static void duqu() { // random tf aa_tf_tfmat_t T; aa_tf_duqu_t H; double E[7]; double S_ident[8] = AA_TF_DUQU_IDENT_INITIALIZER; double Q_ident[4] = AA_TF_QUAT_IDENT_INITIALIZER; double v_ident[3] = {0}; double p0[3]; rand_tf( E, H.data, T.data ); aa_vrand( 3, p0 ); { double A[8], B[8]; aa_vrand(8,A); aa_vrand(8,B); // mul { double A_L[8*8], B_R[8*8]; double C[8], Cl[8], Cr[8]; aa_tf_duqu_mul(A,B,C); aa_tf_duqu_matrix_l(A, A_L, 8); cblas_dgemv( CblasColMajor, CblasNoTrans, 8, 8, 1.0, A_L, 8, B, 1, 0, Cl, 1 ); aveq( "duqu-mul-L", 8, C, Cl, 1e-6 ); aa_tf_duqu_matrix_r(B, B_R, 8); cblas_dgemv( CblasColMajor, CblasNoTrans, 8, 8, 1.0, B_R, 8, A, 1, 0, Cr, 1 ); aveq( "duqu-mul-R", 8, C, Cr, 1e-6 ); } // add / sub { double Ca[8], Cs[8], mB[8]; for( size_t i = 0; i < 8; i ++ ) mB[i] = -B[i]; aa_tf_duqu_add(A,B,Ca); aa_tf_duqu_sub(A,mB,Cs); aveq( "duqu-add-sub", 8, Ca, Cs, 1e-6 ); double Cra[4], Crs[4]; double Cda[4], Cds[4]; aa_tf_duqu_sub(A,B,Cs); aa_tf_qadd(A+AA_TF_DUQU_REAL, B+AA_TF_DUQU_REAL,Cra); aa_tf_qadd(A+AA_TF_DUQU_DUAL, B+AA_TF_DUQU_DUAL,Cda); aa_tf_qsub(A+AA_TF_DUQU_REAL, B+AA_TF_DUQU_REAL,Crs); aa_tf_qsub(A+AA_TF_DUQU_DUAL, B+AA_TF_DUQU_DUAL,Cds); aveq( "duqu-qadd-real", 4, Cra, Ca+AA_TF_DUQU_REAL, 1e-6); aveq( "duqu-qadd-dual", 4, Cda, Ca+AA_TF_DUQU_DUAL, 1e-6); aveq( "duqu-qsub-real", 4, Crs, Cs+AA_TF_DUQU_REAL, 1e-6); aveq( "duqu-qsub-dual", 4, Cds, Cs+AA_TF_DUQU_DUAL, 1e-6); } } //double q[4], v[3], p0[3]; //aa_vrand( 3, v ); //aa_tf_qurand( q ); //AA_MEM_SET( v, 0, 3 ); // tfmat //aa_tf_quat2rotmat(q, T.R); //AA_MEM_CPY( &T.t.x, v, 3 ); // dual quat //aa_tf_qv2duqu( q, v, H.data ); //aa_tf_qv2duqu( aa_tf_quat_ident, v, H_tran.data ); // check trans double hv[3]; aa_tf_duqu_trans(H.data, hv); aveq("duqu-trans", 3, T.v.data, hv, .001 ); //double nreal,ndual; //aa_tf_duqu_norm( H.data, &nreal, &ndual ); //printf("norm: %f + %f \\epsilon \n", nreal, ndual ); // transform points double p1H[3], p1qv[3], p1T[3]; aa_tf_12( T.data, p0, p1T ); aa_tf_tf_qv( H.real.data, T.v.data, p0, p1qv ); aa_tf_tf_duqu( H.data, p0, p1H ); aveq( "tf-qv", 3, p1T, p1qv, .001 ); aveq( "tf-duqu", 3, p1T, p1H, .001 ); // conjugate { double S_conj[8]; double qv_conj[7], E_conj[7]; double SSc[8], EEc[7]; double Scv[3]; aa_tf_duqu_conj(H.data, S_conj); aa_tf_qv_conj(H.real.data, T.v.data, qv_conj, qv_conj+4); aa_tf_qutr_conj(E, E_conj); aa_tf_duqu_trans(S_conj, Scv); aveq( "duqu/qutr conj q", 4, S_conj, E_conj, 1e-6 ); aveq( "duqu/qv conj q", 4, S_conj, qv_conj, 1e-6 ); aveq( "duqu/qutr conj v", 3, Scv, E_conj+4, 1e-6 ); aveq( "duqu/qv conj v", 3, Scv, qv_conj+4, 1e-6 ); aa_tf_duqu_mul( H.data, S_conj, SSc ); aa_tf_qv_chain( H.real.data, T.v.data, qv_conj, qv_conj+4, EEc, EEc+4 ); aveq( "duqu conj", 8, SSc, S_ident, 1e-6 ); aveq( "qv conj q", 4, EEc, Q_ident, 1e-6 ); aveq( "qv conj v", 3, EEc+4, v_ident, 1e-6 ); } // derivative { double dx[6], dd[8], dq[4]; aa_vrand(6, dx); double dt = aa_frand() / 100; aa_tf_duqu_vel2diff( H.data, dx, dd ); aa_tf_qvel2diff( H.real.data, dx+3, dq ); // back to velocity double dx1[6]; aa_tf_duqu_diff2vel( H.data, dd, dx1 ); aveq( "duqu-vel invert", 6, dx, dx1, .001 ); // integrate double H1[8], q1[4], v1[3], H1qv[8]; double H1_sdd[8], H1_sdx[8]; for( size_t i = 0; i < 8; i ++ ) H1[i] = H.data[i] + dd[i]*dt; // some numerical error here... for( size_t i = 0; i < 3; i ++ ) v1[i] = T.v.data[i] + dx[i]*dt; aa_tf_duqu_normalize( H1 ); aa_tf_qsvel( H.real.data, dx+3, dt, q1 ); aa_tf_qv2duqu( q1, v1, H1qv ); aveq( "duqu-vel_real", 4, dq, dd, .001 ); aveq( "duqu-vel-int real", 4, H1, H1qv, .001 ); aveq( "duqu-vel-int dual", 4, H1+4, H1qv+4, .001 ); aa_tf_duqu_svel( H.data, dx, dt, H1_sdx ); aa_tf_duqu_sdiff( H.data, dd, dt, H1_sdd ); aveq( "duqu-int vel", 8, H1qv, H1_sdx, .001 ); aveq( "duqu-int diff", 8, H1_sdx, H1_sdd, .0001 ); /* // twist */ double tw[8], dxtw[6]; aa_tf_duqu_vel2twist(H.data, dx, tw ); aa_tf_duqu_twist2vel(H.data, tw, dxtw ); aveq( "duqu twist<->vel", 6, dx, dxtw, 1e-6 ); } // exponential { double expd[8], lnexpd[8]; aa_tf_duqu_exp(H.data, expd ); aa_tf_duqu_ln( expd, lnexpd ); aveq( "duqu-exp-ln", 8, H.data, lnexpd, .001 ); aa_tf_duqu_ln( H.data, lnexpd ); aa_tf_duqu_exp(lnexpd, expd ); aveq( "duqu-ln-exp", 8, H.data, expd, .001 ); } // Logarithm { double HI[8], HIln[8], dxi[6], dx0[6] = {0}; aa_tf_duqu_mulc( H.data, H.data, HI ); aa_tf_duqu_ln(HI, HIln); aa_tf_duqu_twist2vel(HI, HIln, dxi ); aveq( "duqu ln 0 near", 6, dx0, dxi, .0001 ); aa_tf_duqu_ln(aa_tf_duqu_ident, HIln); aa_tf_duqu_twist2vel(HI, HIln, dxi ); aveq( "duqu ln 0 exact", 6, dx0, dxi, 0.0 ); } // Pure translation { double S[8], v[3], v1[3]; aa_vrand(3,v); aa_tf_xyz2duqu( v[0], v[1], v[2], S ); aa_tf_duqu_trans(S, v1); aveq( "duqu trans orientation", 4, S, aa_tf_quat_ident, 0.0 ); aveq( "duqu trans translation", 3, v, v1, 1e-6 ); } }
static void quat(double E[2][7]) { double u; double *q1 = E[0]; double *q2 = E[0]; u = aa_frand(); { double qg[4], qa[4]; aa_tf_qslerp( u, q1, q2, qg ); aa_tf_qslerpalg( u, q1, q2, qa ); aveq("slerp", 4, qg, qa, .001 ); double dqg[4], dqa[4]; aa_tf_qslerpdiff( u, q1, q2, dqg ); aa_tf_qslerpdiffalg( u, q1, q2, dqa ); aveq("slerpdiff", 4, dqg, dqa, .001 ); } // mul { double Ql[16], Qr[16]; double y0[4], y1[4], y2[4]; aa_tf_qmatrix_l(q1, Ql, 4); aa_tf_qmatrix_r(q2, Qr, 4); aa_tf_qmul(q1,q2, y0); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Ql, 4, q2, 1, 0, y1, 1 ); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Qr, 4, q1, 1, 0, y2, 1 ); aveq( "qmul-1", 4, y0, y1, 1e-6 ); aveq( "qmul-2", 4, y0, y2, 1e-6 ); } // average { double qq[8], p[4], s[4]; AA_MEM_CPY( qq, q1, 4 ); AA_MEM_CPY( qq+4, q2, 4 ); double w[2] = {.5,.5}; aa_tf_quat_davenport( 2, w, qq, 4, p ); aa_tf_qslerp( .5, q1, q2, s ); aa_tf_qminimize( p ); aa_tf_qminimize( s ); aveq("davenport-2", 4, p, s, 1e-4 ); } double R1[9], R2[9], Rr[9], qr[4], qrr[4]; aa_tf_quat2rotmat(q1, R1); aa_tf_quat2rotmat(q2, R2); aa_tf_9rel( R1, R2, Rr ); aa_tf_qrel( q1, q2, qr ); aa_tf_rotmat2quat( Rr, qrr ); aa_tf_qminimize( qr ); aa_tf_qminimize( qrr ); aveq("qrel", 4, qr, qrr, .001 ); // minimize { double qmin[4], axang[4]; aa_tf_qminimize2( q1, qmin ); test( "quat-minimize", aa_feq( fabs(q1[3]), qmin[3], 0) ); aa_tf_quat2axang( qmin, axang ); test( "quat-minimize-angle", fabs(axang[3]) <= M_PI ); } // mulc { double q1c[4], q2c[4], t1[4], t2[4]; aa_tf_qconj(q1, q1c); aa_tf_qconj(q2, q2c); aa_tf_qmul(q1,q2c,t1); aa_tf_qmulc(q1,q2,t2); aveq("qmulc", 4, t1, t2, .001 ); aa_tf_qmul(q1c,q2,t1); aa_tf_qcmul(q1,q2,t2); aveq("qcmul", 4, t1, t2, .001 ); } // conj. props { // p*q = conj(conj(q) * conj(p)) double c1[4], c2[4], c2c1[4], cc2c1[4], q1q2[4]; aa_tf_qconj(q1,c1); aa_tf_qconj(q2,c2); aa_tf_qmul(c2,c1,c2c1); aa_tf_qmul(q1,q2,q1q2); aa_tf_qconj(c2c1,cc2c1); aveq("conjprop", 4, q1q2, cc2c1, .0001); } // exp { double q1e[4], q1eln[4]; aa_tf_qexp(q1, q1e); aa_tf_qln(q1e, q1eln); aveq("exp-log", 4, q1, q1eln, .00001 ); aa_tf_qln(q1, q1eln); aa_tf_qexp(q1eln, q1e); aveq("log-exp", 4, q1, q1e, .00001 ); } // diff double w[3]={0}, dq[4], wdq[3]; aa_vrand( 3, w ); aa_tf_qvel2diff( q1, w, dq ); aa_tf_qdiff2vel( q1, dq, wdq ); aveq("qveldiff", 3, w, wdq, .000001); // integrate double qn_rk1[4], qn_vrk1[4], qn_vrk4[4], qn_vexp[4], qn_dq[4], w0[3] = {0}; double dt = .02; aa_tf_qrk1( q1, dq, dt, qn_rk1 ); aa_tf_qvelrk1( q1, w, dt, qn_vrk1 ); aa_tf_qvelrk4( q1, w, dt, qn_vrk4 ); aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_qsdiff( q1, dq, dt, qn_dq ); aveq("qvelrk1", 4, qn_rk1, qn_vrk1, .001 ); aveq("qvelrk4", 4, qn_rk1, qn_vrk4, .001 ); aveq("qvelexp", 4, qn_vrk4, qn_vexp, .0001); aveq("qvelsdiff", 4, qn_vexp, qn_dq, .001 ); aa_tf_qsvel( q1, w0, dt, qn_vexp ); aveq("qvelsvel0", 4, q1, qn_vexp, .000 ); { double Rb[9], qR[4]; aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_rotmat_svel( R1, w, dt, Rb ); aa_tf_rotmat2quat( Rb, qR ); aa_tf_qminimize( qn_vexp); aa_tf_qminimize( qR ); aveq("rotmat_svel", 4, qn_vexp, qR, 1e-4 ); } // vectors { double *v0 = E[0] + AA_TF_QUTR_T; double *v1 = E[1] + AA_TF_QUTR_T; double q[4], vp[3]; // identify case aa_tf_vecs2quat( v0, v0, q); aveq( "vecs2quat-ident", 4, q, aa_tf_quat_ident, 1e-6 ); // regular case aa_tf_vecs2quat( v0, v1, q); aa_tf_qrot(q,v0,vp); // normalize result { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v1[0]*v1[0] + v1[1]*v1[1] + v1[2]*v1[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v1[i] / n1; } aveq("vecs2quat", 3, vp, vp1, 1e-6 ); } // inverted case double v0n[3] = {-v0[0], -v0[1], -v0[2]}; aa_tf_vecs2quat( v0, v0n, q); aa_tf_qrot(q,v0,vp); { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v0n[0]*v0n[0] + v0n[1]*v0n[1] + v0n[2]*v0n[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v0n[i] / n1; } aveq("vecs2quat-degenerate", 3, vp, vp1, 1e-6 ); } } }