static void slerp() { double q[4], qy[4], u, du; double dq1[4], dq2[4], dqy[4]; aa_tf_qurand(q); u = aa_frand(); du = aa_frand(); aa_vrand(4,dq1); aa_vrand(4,dq2); aa_tf_qslerpchaindiff( u, du, q, dq1, q, dq2, qy, dqy ); aveq("chaindiff equiv", 4, q, qy, 1e-6); }
static void theta2quat() { double theta = (aa_frand() - 0.5) * 2 * M_PI; double qx[4], qy[4], qz[4]; double Rx[9], Ry[9], Rz[9]; double qRx[4], qRy[4], qRz[4]; aa_tf_xangle2rotmat( theta, Rx ); aa_tf_yangle2rotmat( theta, Ry ); aa_tf_zangle2rotmat( theta, Rz ); aa_tf_xangle2quat( theta, qx ); aa_tf_yangle2quat( theta, qy ); aa_tf_zangle2quat( theta, qz ); aa_tf_rotmat2quat( Rx, qRx ); aa_tf_rotmat2quat( Ry, qRy ); aa_tf_rotmat2quat( Rz, qRz ); aa_tf_qminimize( qx ); aa_tf_qminimize( qRx ); aa_tf_qminimize( qy ); aa_tf_qminimize( qRy ); aa_tf_qminimize( qz ); aa_tf_qminimize( qRz ); aveq("xangle2quat", 4, qx, qRx, 1e-6 ); aveq("yangle2quat", 4, qy, qRy, 1e-6 ); aveq("xangle2quat", 4, qz, qRz, 1e-6 ); }
static void integrate(const double *E, const double *S, const double *T, const double *dx) { const double *q = E+AA_TF_QUTR_Q; /* const double *v = E+AA_TF_QUTR_V; */ /* const double *dv = dx+AA_TF_DX_V; */ const double *w = dx+AA_TF_DX_W; const double *R = T+AA_TF_TFMAT_R; double dt = aa_frand() / 100; { /* quatenion */ double dq[4], wp[3]; aa_tf_qvel2diff(q, w, dq); aa_tf_qdiff2vel(q,dq,wp); aveq( "qdiff<->vel", 3, w, wp, 1e-5 ); } { /* rotmat */ double dR[9], wp[3]; aa_tf_rotmat_vel2diff(R, w, dR); aa_tf_rotmat_diff2vel(R, dR, wp ); aveq( "rotmat diff<->vel", 3, w, wp, 1e-5 ); } { /* quaternion translation */ double dE[7], dxp[6]; aa_tf_qutr_vel2diff( E, dx, dE ); aa_tf_qutr_diff2vel( E, dE, dxp ); aveq( "qutr diff<->vel", 6, dx, dxp, 1e-5 ); } { /* dual quaternion */ double dS[8], dxp[6]; aa_tf_duqu_vel2diff( S, dx, dS ); aa_tf_duqu_diff2vel( S, dS, dxp ); aveq( "duqu diff<->vel", 6, dx, dxp, 1e-5 ); } // integrate double S1[8], q1[4], T1[12], R1[9], E1[7]; aa_tf_duqu_svel( S, dx, dt, S1 ); aa_tf_qsvel( q, dx+3, dt, q1 ); aa_tf_rotmat_svel( T, dx+3, dt, R1 ); aa_tf_tfmat_svel( T, dx, dt, T1 ); aa_tf_qutr_svel( E, dx, dt, E1 ); // normalize double R1q[4], T1q[8], E1q[8]; aa_tf_rotmat2quat( R1, R1q ); aa_tf_tfmat2duqu( T1, T1q ); aa_tf_qutr2duqu(E1, E1q); aa_tf_duqu_minimize( S1 ); aa_tf_duqu_minimize( T1q ); aa_tf_duqu_minimize( E1q ); aa_tf_qminimize( q1 ); aa_tf_qminimize( R1q ); // check aveq( "duqu-quat", 4, S, q, 0 ); aveq( "int-duqu-quat", 4, S1, q1, 1e-8 ); aveq( "int-rotmat-quat", 4, R1q, q1, 1e-8 ); aveq( "int-duqu-tfmat", 8, S1, T1q, 1e-6 ); aveq( "int-qutr", 8, S1, E1q, 1e-6 ); // normalized check aa_tf_duqu_normalize( T1q ); aa_tf_duqu_normalize( S1 ); aveq( "int-duqu-tfmat-norm", 8, S1, T1q, 1e-7 ); }
static void duqu() { // random tf aa_tf_tfmat_t T; aa_tf_duqu_t H; double E[7]; double S_ident[8] = AA_TF_DUQU_IDENT_INITIALIZER; double Q_ident[4] = AA_TF_QUAT_IDENT_INITIALIZER; double v_ident[3] = {0}; double p0[3]; rand_tf( E, H.data, T.data ); aa_vrand( 3, p0 ); { double A[8], B[8]; aa_vrand(8,A); aa_vrand(8,B); // mul { double A_L[8*8], B_R[8*8]; double C[8], Cl[8], Cr[8]; aa_tf_duqu_mul(A,B,C); aa_tf_duqu_matrix_l(A, A_L, 8); cblas_dgemv( CblasColMajor, CblasNoTrans, 8, 8, 1.0, A_L, 8, B, 1, 0, Cl, 1 ); aveq( "duqu-mul-L", 8, C, Cl, 1e-6 ); aa_tf_duqu_matrix_r(B, B_R, 8); cblas_dgemv( CblasColMajor, CblasNoTrans, 8, 8, 1.0, B_R, 8, A, 1, 0, Cr, 1 ); aveq( "duqu-mul-R", 8, C, Cr, 1e-6 ); } // add / sub { double Ca[8], Cs[8], mB[8]; for( size_t i = 0; i < 8; i ++ ) mB[i] = -B[i]; aa_tf_duqu_add(A,B,Ca); aa_tf_duqu_sub(A,mB,Cs); aveq( "duqu-add-sub", 8, Ca, Cs, 1e-6 ); double Cra[4], Crs[4]; double Cda[4], Cds[4]; aa_tf_duqu_sub(A,B,Cs); aa_tf_qadd(A+AA_TF_DUQU_REAL, B+AA_TF_DUQU_REAL,Cra); aa_tf_qadd(A+AA_TF_DUQU_DUAL, B+AA_TF_DUQU_DUAL,Cda); aa_tf_qsub(A+AA_TF_DUQU_REAL, B+AA_TF_DUQU_REAL,Crs); aa_tf_qsub(A+AA_TF_DUQU_DUAL, B+AA_TF_DUQU_DUAL,Cds); aveq( "duqu-qadd-real", 4, Cra, Ca+AA_TF_DUQU_REAL, 1e-6); aveq( "duqu-qadd-dual", 4, Cda, Ca+AA_TF_DUQU_DUAL, 1e-6); aveq( "duqu-qsub-real", 4, Crs, Cs+AA_TF_DUQU_REAL, 1e-6); aveq( "duqu-qsub-dual", 4, Cds, Cs+AA_TF_DUQU_DUAL, 1e-6); } } //double q[4], v[3], p0[3]; //aa_vrand( 3, v ); //aa_tf_qurand( q ); //AA_MEM_SET( v, 0, 3 ); // tfmat //aa_tf_quat2rotmat(q, T.R); //AA_MEM_CPY( &T.t.x, v, 3 ); // dual quat //aa_tf_qv2duqu( q, v, H.data ); //aa_tf_qv2duqu( aa_tf_quat_ident, v, H_tran.data ); // check trans double hv[3]; aa_tf_duqu_trans(H.data, hv); aveq("duqu-trans", 3, T.v.data, hv, .001 ); //double nreal,ndual; //aa_tf_duqu_norm( H.data, &nreal, &ndual ); //printf("norm: %f + %f \\epsilon \n", nreal, ndual ); // transform points double p1H[3], p1qv[3], p1T[3]; aa_tf_12( T.data, p0, p1T ); aa_tf_tf_qv( H.real.data, T.v.data, p0, p1qv ); aa_tf_tf_duqu( H.data, p0, p1H ); aveq( "tf-qv", 3, p1T, p1qv, .001 ); aveq( "tf-duqu", 3, p1T, p1H, .001 ); // conjugate { double S_conj[8]; double qv_conj[7], E_conj[7]; double SSc[8], EEc[7]; double Scv[3]; aa_tf_duqu_conj(H.data, S_conj); aa_tf_qv_conj(H.real.data, T.v.data, qv_conj, qv_conj+4); aa_tf_qutr_conj(E, E_conj); aa_tf_duqu_trans(S_conj, Scv); aveq( "duqu/qutr conj q", 4, S_conj, E_conj, 1e-6 ); aveq( "duqu/qv conj q", 4, S_conj, qv_conj, 1e-6 ); aveq( "duqu/qutr conj v", 3, Scv, E_conj+4, 1e-6 ); aveq( "duqu/qv conj v", 3, Scv, qv_conj+4, 1e-6 ); aa_tf_duqu_mul( H.data, S_conj, SSc ); aa_tf_qv_chain( H.real.data, T.v.data, qv_conj, qv_conj+4, EEc, EEc+4 ); aveq( "duqu conj", 8, SSc, S_ident, 1e-6 ); aveq( "qv conj q", 4, EEc, Q_ident, 1e-6 ); aveq( "qv conj v", 3, EEc+4, v_ident, 1e-6 ); } // derivative { double dx[6], dd[8], dq[4]; aa_vrand(6, dx); double dt = aa_frand() / 100; aa_tf_duqu_vel2diff( H.data, dx, dd ); aa_tf_qvel2diff( H.real.data, dx+3, dq ); // back to velocity double dx1[6]; aa_tf_duqu_diff2vel( H.data, dd, dx1 ); aveq( "duqu-vel invert", 6, dx, dx1, .001 ); // integrate double H1[8], q1[4], v1[3], H1qv[8]; double H1_sdd[8], H1_sdx[8]; for( size_t i = 0; i < 8; i ++ ) H1[i] = H.data[i] + dd[i]*dt; // some numerical error here... for( size_t i = 0; i < 3; i ++ ) v1[i] = T.v.data[i] + dx[i]*dt; aa_tf_duqu_normalize( H1 ); aa_tf_qsvel( H.real.data, dx+3, dt, q1 ); aa_tf_qv2duqu( q1, v1, H1qv ); aveq( "duqu-vel_real", 4, dq, dd, .001 ); aveq( "duqu-vel-int real", 4, H1, H1qv, .001 ); aveq( "duqu-vel-int dual", 4, H1+4, H1qv+4, .001 ); aa_tf_duqu_svel( H.data, dx, dt, H1_sdx ); aa_tf_duqu_sdiff( H.data, dd, dt, H1_sdd ); aveq( "duqu-int vel", 8, H1qv, H1_sdx, .001 ); aveq( "duqu-int diff", 8, H1_sdx, H1_sdd, .0001 ); /* // twist */ double tw[8], dxtw[6]; aa_tf_duqu_vel2twist(H.data, dx, tw ); aa_tf_duqu_twist2vel(H.data, tw, dxtw ); aveq( "duqu twist<->vel", 6, dx, dxtw, 1e-6 ); } // exponential { double expd[8], lnexpd[8]; aa_tf_duqu_exp(H.data, expd ); aa_tf_duqu_ln( expd, lnexpd ); aveq( "duqu-exp-ln", 8, H.data, lnexpd, .001 ); aa_tf_duqu_ln( H.data, lnexpd ); aa_tf_duqu_exp(lnexpd, expd ); aveq( "duqu-ln-exp", 8, H.data, expd, .001 ); } // Logarithm { double HI[8], HIln[8], dxi[6], dx0[6] = {0}; aa_tf_duqu_mulc( H.data, H.data, HI ); aa_tf_duqu_ln(HI, HIln); aa_tf_duqu_twist2vel(HI, HIln, dxi ); aveq( "duqu ln 0 near", 6, dx0, dxi, .0001 ); aa_tf_duqu_ln(aa_tf_duqu_ident, HIln); aa_tf_duqu_twist2vel(HI, HIln, dxi ); aveq( "duqu ln 0 exact", 6, dx0, dxi, 0.0 ); } // Pure translation { double S[8], v[3], v1[3]; aa_vrand(3,v); aa_tf_xyz2duqu( v[0], v[1], v[2], S ); aa_tf_duqu_trans(S, v1); aveq( "duqu trans orientation", 4, S, aa_tf_quat_ident, 0.0 ); aveq( "duqu trans translation", 3, v, v1, 1e-6 ); } }
static void quat(double E[2][7]) { double u; double *q1 = E[0]; double *q2 = E[0]; u = aa_frand(); { double qg[4], qa[4]; aa_tf_qslerp( u, q1, q2, qg ); aa_tf_qslerpalg( u, q1, q2, qa ); aveq("slerp", 4, qg, qa, .001 ); double dqg[4], dqa[4]; aa_tf_qslerpdiff( u, q1, q2, dqg ); aa_tf_qslerpdiffalg( u, q1, q2, dqa ); aveq("slerpdiff", 4, dqg, dqa, .001 ); } // mul { double Ql[16], Qr[16]; double y0[4], y1[4], y2[4]; aa_tf_qmatrix_l(q1, Ql, 4); aa_tf_qmatrix_r(q2, Qr, 4); aa_tf_qmul(q1,q2, y0); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Ql, 4, q2, 1, 0, y1, 1 ); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Qr, 4, q1, 1, 0, y2, 1 ); aveq( "qmul-1", 4, y0, y1, 1e-6 ); aveq( "qmul-2", 4, y0, y2, 1e-6 ); } // average { double qq[8], p[4], s[4]; AA_MEM_CPY( qq, q1, 4 ); AA_MEM_CPY( qq+4, q2, 4 ); double w[2] = {.5,.5}; aa_tf_quat_davenport( 2, w, qq, 4, p ); aa_tf_qslerp( .5, q1, q2, s ); aa_tf_qminimize( p ); aa_tf_qminimize( s ); aveq("davenport-2", 4, p, s, 1e-4 ); } double R1[9], R2[9], Rr[9], qr[4], qrr[4]; aa_tf_quat2rotmat(q1, R1); aa_tf_quat2rotmat(q2, R2); aa_tf_9rel( R1, R2, Rr ); aa_tf_qrel( q1, q2, qr ); aa_tf_rotmat2quat( Rr, qrr ); aa_tf_qminimize( qr ); aa_tf_qminimize( qrr ); aveq("qrel", 4, qr, qrr, .001 ); // minimize { double qmin[4], axang[4]; aa_tf_qminimize2( q1, qmin ); test( "quat-minimize", aa_feq( fabs(q1[3]), qmin[3], 0) ); aa_tf_quat2axang( qmin, axang ); test( "quat-minimize-angle", fabs(axang[3]) <= M_PI ); } // mulc { double q1c[4], q2c[4], t1[4], t2[4]; aa_tf_qconj(q1, q1c); aa_tf_qconj(q2, q2c); aa_tf_qmul(q1,q2c,t1); aa_tf_qmulc(q1,q2,t2); aveq("qmulc", 4, t1, t2, .001 ); aa_tf_qmul(q1c,q2,t1); aa_tf_qcmul(q1,q2,t2); aveq("qcmul", 4, t1, t2, .001 ); } // conj. props { // p*q = conj(conj(q) * conj(p)) double c1[4], c2[4], c2c1[4], cc2c1[4], q1q2[4]; aa_tf_qconj(q1,c1); aa_tf_qconj(q2,c2); aa_tf_qmul(c2,c1,c2c1); aa_tf_qmul(q1,q2,q1q2); aa_tf_qconj(c2c1,cc2c1); aveq("conjprop", 4, q1q2, cc2c1, .0001); } // exp { double q1e[4], q1eln[4]; aa_tf_qexp(q1, q1e); aa_tf_qln(q1e, q1eln); aveq("exp-log", 4, q1, q1eln, .00001 ); aa_tf_qln(q1, q1eln); aa_tf_qexp(q1eln, q1e); aveq("log-exp", 4, q1, q1e, .00001 ); } // diff double w[3]={0}, dq[4], wdq[3]; aa_vrand( 3, w ); aa_tf_qvel2diff( q1, w, dq ); aa_tf_qdiff2vel( q1, dq, wdq ); aveq("qveldiff", 3, w, wdq, .000001); // integrate double qn_rk1[4], qn_vrk1[4], qn_vrk4[4], qn_vexp[4], qn_dq[4], w0[3] = {0}; double dt = .02; aa_tf_qrk1( q1, dq, dt, qn_rk1 ); aa_tf_qvelrk1( q1, w, dt, qn_vrk1 ); aa_tf_qvelrk4( q1, w, dt, qn_vrk4 ); aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_qsdiff( q1, dq, dt, qn_dq ); aveq("qvelrk1", 4, qn_rk1, qn_vrk1, .001 ); aveq("qvelrk4", 4, qn_rk1, qn_vrk4, .001 ); aveq("qvelexp", 4, qn_vrk4, qn_vexp, .0001); aveq("qvelsdiff", 4, qn_vexp, qn_dq, .001 ); aa_tf_qsvel( q1, w0, dt, qn_vexp ); aveq("qvelsvel0", 4, q1, qn_vexp, .000 ); { double Rb[9], qR[4]; aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_rotmat_svel( R1, w, dt, Rb ); aa_tf_rotmat2quat( Rb, qR ); aa_tf_qminimize( qn_vexp); aa_tf_qminimize( qR ); aveq("rotmat_svel", 4, qn_vexp, qR, 1e-4 ); } // vectors { double *v0 = E[0] + AA_TF_QUTR_T; double *v1 = E[1] + AA_TF_QUTR_T; double q[4], vp[3]; // identify case aa_tf_vecs2quat( v0, v0, q); aveq( "vecs2quat-ident", 4, q, aa_tf_quat_ident, 1e-6 ); // regular case aa_tf_vecs2quat( v0, v1, q); aa_tf_qrot(q,v0,vp); // normalize result { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v1[0]*v1[0] + v1[1]*v1[1] + v1[2]*v1[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v1[i] / n1; } aveq("vecs2quat", 3, vp, vp1, 1e-6 ); } // inverted case double v0n[3] = {-v0[0], -v0[1], -v0[2]}; aa_tf_vecs2quat( v0, v0n, q); aa_tf_qrot(q,v0,vp); { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v0n[0]*v0n[0] + v0n[1]*v0n[1] + v0n[2]*v0n[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v0n[i] / n1; } aveq("vecs2quat-degenerate", 3, vp, vp1, 1e-6 ); } } }
AA_API void aa_vrand(size_t n, double *v) { for( size_t i = 0; i < n; i ++ ) v[i] = aa_frand(); }