static void tfmat_inv(const double T[12]) { double Tc[12], Ti[12], Tr[12]; // aa_tf_rotmat_inv1 AA_MEM_CPY(Tc, T, 12); aa_tf_rotmat_inv1(Tc); aa_tf_rotmat_mul(T,Tc,Tr); aveq( "aa_tf_rotmat_inv1", 9, Tr, aa_tf_rotmat_ident, 1e-6 ); // aa_tf_rotmat_inv2 AA_MEM_CPY(Tc, T, 12); aa_tf_rotmat_inv2(Tc, Ti); aa_tf_rotmat_mul(T,Ti,Tr); aveq( "aa_tf_rotmat_inv2", 9, Tr, aa_tf_rotmat_ident, 1e-6 ); // aa_tf_tfmat_inv1 AA_MEM_CPY(Tc, T, 12); aa_tf_tfmat_inv1(Tc); aa_tf_tfmat_mul(T,Tc,Tr); aveq( "aa_tf_tfmat_inv1", 12, Tr, aa_tf_tfmat_ident, 1e-6 ); // aa_tf_rotmat_inv2 AA_MEM_CPY(Tc, T, 12); aa_tf_tfmat_inv2(Tc, Ti); aa_tf_tfmat_mul(T,Ti,Tr); aveq( "aa_tf_tfmat_inv2", 12, Tr, aa_tf_tfmat_ident, 1e-6 ); }
AA_API void aa_tf_qv_svel( const double q0[AA_RESTRICT 4], const double v0[AA_RESTRICT 3], const double w[AA_RESTRICT 3], const double dv[AA_RESTRICT 3], double dt, double q1[AA_RESTRICT 4], double v1[AA_RESTRICT 3] ) { double dx[6], S0[8], S1[8]; /* TODO: avoid the memcpy()s */ AA_MEM_CPY( dx+AA_TF_DX_W, w, 3 ); AA_MEM_CPY( dx+AA_TF_DX_V, dv, 3 ); aa_tf_qv2duqu( q0, v0, S0 ); aa_tf_duqu_svel( S0, dx, dt, S1 ); aa_tf_duqu2qv(S1, q1, v1); }
SceneFrame::SceneFrame( enum aa_rx_frame_type type_, const char *_parent, const char *_name, const double q[4], const double v[3] ) : type(type_), name(_name), parent(_parent), inertial(NULL) { AA_MEM_CPY(E+AA_TF_QUTR_Q, q ? q : aa_tf_quat_ident, 4); AA_MEM_CPY(E+AA_TF_QUTR_V, v ? v : aa_tf_vec_ident, 3); }
AA_API void aa_rx_sg_tf ( const struct aa_rx_sg *scene_graph, size_t n_q, const double *q, size_t n_tf, double *TF_rel, size_t ld_rel, double *TF_abs, size_t ld_abs ) { aa_rx_sg_ensure_clean_frames( scene_graph ); amino::SceneGraph *sg = scene_graph->sg; size_t i_frame = 0; for( size_t i_rel = 0, i_abs = 0; i_frame < n_tf && i_frame < sg->frames.size(); i_frame++, i_rel += ld_rel, i_abs += ld_abs ) { amino::SceneFrame *f = sg->frames[(aa_rx_frame_id)i_frame]; double *E_rel = TF_rel + i_rel; double *E_abs = TF_abs + i_abs; // compute relative f->tf_rel( q, E_rel ); // chain to global if( f->in_global() ) { // TODO: can we somehow get rid of this branch? // maybe a separate type for global frames AA_MEM_CPY(E_abs, E_rel, 7); } else { assert( f->parent_id < (ssize_t)i_frame ); double *E_abs_parent = TF_abs + (ld_abs * f->parent_id);; aa_tf_qutr_mul(E_abs_parent, E_rel, E_abs); } } }
AA_API void aa_ct_state_clone(struct aa_mem_region *reg, struct aa_ct_state *dest, struct aa_ct_state *src) { dest->n_q = src->n_q; dest->n_tf = src->n_tf; if (src->q) { dest->q = AA_MEM_REGION_NEW_N(reg, double, src->n_q); AA_MEM_CPY(dest->q, src->q, src->n_q); }
void regress_1() { aa_mem_region_t reg; aa_mem_region_init( ®, 1024*32 ); // Create list for way points struct rfx_trajx_point_list *plist = rfx_trajx_point_list_alloc( ® ); double r0[4] = {0,1,0,0}; double rr[6][4]; double rrel[4][4]; double x0[3] = {0}; aa_tf_zangle2quat(.5*M_PI, rrel[0]); aa_tf_eulerzyx2quat(0, 0, .10*M_PI, rrel[1]); aa_tf_eulerzyx2quat(0, 0, -.2*M_PI, rrel[2]); aa_tf_eulerzyx2quat(-.5*M_PI, 0, 0, rrel[3]); AA_MEM_CPY(rr[0], r0, 4); for( size_t i = 0; i < 4; i ++ ) { aa_tf_qmul( rr[i], rrel[i], rr[i+1] ); } AA_MEM_CPY(rr[5], r0, 4); size_t diff = 2; for( size_t i = 0; i < sizeof(rr)/sizeof(rr[0])-diff; i ++ ) { rfx_trajx_point_list_addb_qv( plist, 5*(double)i, 1, rr[i+diff], x0 ); } // generate trajectory struct rfx_trajx_seg_list *seglist = //rfx_trajx_splend_generate( plist, ® ); rfx_trajx_parablend_generate( plist, ® ); // plot trajectory rfx_trajx_seg_list_plot( seglist, .001, NULL ); }
SceneFrameJoint::SceneFrameJoint( enum aa_rx_frame_type type_, const char *_parent, const char *_name, const double q[4], const double v[3], const char *_config_name, double _offset, const double _axis[3] ) : SceneFrame( type_, _parent, _name, q, v ), config_name(_config_name ? _config_name : _name), offset(_offset) { AA_MEM_CPY(this->axis, _axis, 3); }
static void rand_tf( double _E[7], double S[8], double T[12] ) { double tmp[7]; double *E = _E ? _E : tmp; /* Convert */ aa_tf_qutr_rand( E ); aa_tf_qutr2duqu( E, S ); aa_tf_qutr2tfmat( E, T ); /* Check Conversion */ double E_min[7]; AA_MEM_CPY(E_min, E, 7 ); aa_tf_qminimize( E_min ); { double Es[7], Et[7]; aa_tf_tfmat2qutr( T, Et ); aa_tf_duqu2qutr( S, Es ); aa_tf_qminimize( Es ); aa_tf_qminimize( Et ); aveq( "qutr<->duqu", 7, E_min, Es, 1e-5 ); aveq( "qutr<->tfmat", 7, E_min, Et, 1e-5 ); } }
AA_API void aa_tf_qutr_vel2diff( const double E[AA_RESTRICT 7], const double dx[AA_RESTRICT 6], double dE[AA_RESTRICT 7] ) { aa_tf_qvel2diff(E+AA_TF_QUTR_Q, dx+AA_TF_DX_W, dE+AA_TF_QUTR_Q ); AA_MEM_CPY( dE+AA_TF_QUTR_V, dx+AA_TF_DX_V, 3 ); }
AA_API void aa_tf_qutr_diff2vel( const double E[AA_RESTRICT 7], const double dE[AA_RESTRICT 7], double dx[AA_RESTRICT 6] ) { aa_tf_qdiff2vel(E+AA_TF_QUTR_Q, dE+AA_TF_QUTR_Q, dx+AA_TF_DX_W ); AA_MEM_CPY( dx+AA_TF_DX_V, dE+AA_TF_QUTR_V, 3 ); }
static void quat(double E[2][7]) { double u; double *q1 = E[0]; double *q2 = E[0]; u = aa_frand(); { double qg[4], qa[4]; aa_tf_qslerp( u, q1, q2, qg ); aa_tf_qslerpalg( u, q1, q2, qa ); aveq("slerp", 4, qg, qa, .001 ); double dqg[4], dqa[4]; aa_tf_qslerpdiff( u, q1, q2, dqg ); aa_tf_qslerpdiffalg( u, q1, q2, dqa ); aveq("slerpdiff", 4, dqg, dqa, .001 ); } // mul { double Ql[16], Qr[16]; double y0[4], y1[4], y2[4]; aa_tf_qmatrix_l(q1, Ql, 4); aa_tf_qmatrix_r(q2, Qr, 4); aa_tf_qmul(q1,q2, y0); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Ql, 4, q2, 1, 0, y1, 1 ); cblas_dgemv( CblasColMajor, CblasNoTrans, 4, 4, 1.0, Qr, 4, q1, 1, 0, y2, 1 ); aveq( "qmul-1", 4, y0, y1, 1e-6 ); aveq( "qmul-2", 4, y0, y2, 1e-6 ); } // average { double qq[8], p[4], s[4]; AA_MEM_CPY( qq, q1, 4 ); AA_MEM_CPY( qq+4, q2, 4 ); double w[2] = {.5,.5}; aa_tf_quat_davenport( 2, w, qq, 4, p ); aa_tf_qslerp( .5, q1, q2, s ); aa_tf_qminimize( p ); aa_tf_qminimize( s ); aveq("davenport-2", 4, p, s, 1e-4 ); } double R1[9], R2[9], Rr[9], qr[4], qrr[4]; aa_tf_quat2rotmat(q1, R1); aa_tf_quat2rotmat(q2, R2); aa_tf_9rel( R1, R2, Rr ); aa_tf_qrel( q1, q2, qr ); aa_tf_rotmat2quat( Rr, qrr ); aa_tf_qminimize( qr ); aa_tf_qminimize( qrr ); aveq("qrel", 4, qr, qrr, .001 ); // minimize { double qmin[4], axang[4]; aa_tf_qminimize2( q1, qmin ); test( "quat-minimize", aa_feq( fabs(q1[3]), qmin[3], 0) ); aa_tf_quat2axang( qmin, axang ); test( "quat-minimize-angle", fabs(axang[3]) <= M_PI ); } // mulc { double q1c[4], q2c[4], t1[4], t2[4]; aa_tf_qconj(q1, q1c); aa_tf_qconj(q2, q2c); aa_tf_qmul(q1,q2c,t1); aa_tf_qmulc(q1,q2,t2); aveq("qmulc", 4, t1, t2, .001 ); aa_tf_qmul(q1c,q2,t1); aa_tf_qcmul(q1,q2,t2); aveq("qcmul", 4, t1, t2, .001 ); } // conj. props { // p*q = conj(conj(q) * conj(p)) double c1[4], c2[4], c2c1[4], cc2c1[4], q1q2[4]; aa_tf_qconj(q1,c1); aa_tf_qconj(q2,c2); aa_tf_qmul(c2,c1,c2c1); aa_tf_qmul(q1,q2,q1q2); aa_tf_qconj(c2c1,cc2c1); aveq("conjprop", 4, q1q2, cc2c1, .0001); } // exp { double q1e[4], q1eln[4]; aa_tf_qexp(q1, q1e); aa_tf_qln(q1e, q1eln); aveq("exp-log", 4, q1, q1eln, .00001 ); aa_tf_qln(q1, q1eln); aa_tf_qexp(q1eln, q1e); aveq("log-exp", 4, q1, q1e, .00001 ); } // diff double w[3]={0}, dq[4], wdq[3]; aa_vrand( 3, w ); aa_tf_qvel2diff( q1, w, dq ); aa_tf_qdiff2vel( q1, dq, wdq ); aveq("qveldiff", 3, w, wdq, .000001); // integrate double qn_rk1[4], qn_vrk1[4], qn_vrk4[4], qn_vexp[4], qn_dq[4], w0[3] = {0}; double dt = .02; aa_tf_qrk1( q1, dq, dt, qn_rk1 ); aa_tf_qvelrk1( q1, w, dt, qn_vrk1 ); aa_tf_qvelrk4( q1, w, dt, qn_vrk4 ); aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_qsdiff( q1, dq, dt, qn_dq ); aveq("qvelrk1", 4, qn_rk1, qn_vrk1, .001 ); aveq("qvelrk4", 4, qn_rk1, qn_vrk4, .001 ); aveq("qvelexp", 4, qn_vrk4, qn_vexp, .0001); aveq("qvelsdiff", 4, qn_vexp, qn_dq, .001 ); aa_tf_qsvel( q1, w0, dt, qn_vexp ); aveq("qvelsvel0", 4, q1, qn_vexp, .000 ); { double Rb[9], qR[4]; aa_tf_qsvel( q1, w, dt, qn_vexp ); aa_tf_rotmat_svel( R1, w, dt, Rb ); aa_tf_rotmat2quat( Rb, qR ); aa_tf_qminimize( qn_vexp); aa_tf_qminimize( qR ); aveq("rotmat_svel", 4, qn_vexp, qR, 1e-4 ); } // vectors { double *v0 = E[0] + AA_TF_QUTR_T; double *v1 = E[1] + AA_TF_QUTR_T; double q[4], vp[3]; // identify case aa_tf_vecs2quat( v0, v0, q); aveq( "vecs2quat-ident", 4, q, aa_tf_quat_ident, 1e-6 ); // regular case aa_tf_vecs2quat( v0, v1, q); aa_tf_qrot(q,v0,vp); // normalize result { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v1[0]*v1[0] + v1[1]*v1[1] + v1[2]*v1[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v1[i] / n1; } aveq("vecs2quat", 3, vp, vp1, 1e-6 ); } // inverted case double v0n[3] = {-v0[0], -v0[1], -v0[2]}; aa_tf_vecs2quat( v0, v0n, q); aa_tf_qrot(q,v0,vp); { double n0 = sqrt(v0[0]*v0[0] + v0[1]*v0[1] + v0[2]*v0[2] ); double n1 = sqrt(v0n[0]*v0n[0] + v0n[1]*v0n[1] + v0n[2]*v0n[2] ); double vp1[3]; for( size_t i = 0; i < 3; i ++ ) { vp1[i] = n0*v0n[i] / n1; } aveq("vecs2quat-degenerate", 3, vp, vp1, 1e-6 ); } } }
void plot_viax() { aa_mem_region_t reg; aa_mem_region_init( ®, 1024*32 ); struct rfx_trajx_point_list *plist = rfx_trajx_point_list_alloc( ® ); double theta = M_PI*.9; //double X[5][5] = { {0,0,0}, {1,0,0}, {1,1,0}, {1,1,1}, {0,0,0} }; //double E[5][5] = { {0,0,0}, {M_PI_2,0,0}, {M_PI_2,M_PI_2,0}, {M_PI_2,M_PI_2,M_PI_2}, {0,0,0} }; double X[2][3] = { {0,0,0}, {0,0,0} }; double E[2][3] = { {theta,0,0}, {theta,theta,0} }; double R[5][4]; double RV[5][3]; size_t n = 2; for( size_t i = 0; i < n; i ++ ) { aa_tf_eulerzyx2quat( E[i][0], E[i][1], E[i][2], R[i] ); aa_tf_quat2rotvec( R[i], RV[i] ); rfx_trajx_point_list_addb_qv( plist, 5*(double)i, 1, R[i], X[i] ); } //rfx_trajx_generate( pT ); //rfx_trajx_plot( pT, .001, NULL ); struct rfx_trajx_seg_list *seglist = //rfx_trajx_splend_generate( plist, ® ); rfx_trajx_parablend_generate( plist, ® ); rfx_trajx_seg_list_plot( seglist, .001, NULL ); return; struct rfx_trajx_seg_list *testlist = rfx_trajx_seg_list_alloc( ® ); { double x_i[6], x_f[6]; AA_MEM_CPY(x_i, X[0], 3 ); AA_MEM_CPY(x_i+3, RV[0], 3 ); AA_MEM_CPY(x_f, X[1], 3 ); AA_MEM_CPY(x_f+3, RV[1], 3 ); struct rfx_trajx_seg *test = rfx_trajx_seg_lerp_rv_alloc( ®, 0, 1, 0, x_i, 1, x_f ) ; rfx_trajx_seg_list_add( testlist, test ); } struct rfx_trajx_seg_list *testlist2 = rfx_trajx_seg_list_alloc( ® ); { struct rfx_trajx_seg *test = rfx_trajx_seg_lerp_slerp_alloc( ®, 0, 1, 0, X[0], R[0], 1, X[1], R[1] ) ; rfx_trajx_seg_list_add( testlist2, test ); } rfx_trajx_seg_list_plot( testlist, .001, NULL ); aa_mem_region_destroy( ® ); }
AA_API void aa_rx_sg_tf_update ( const struct aa_rx_sg *scene_graph, size_t n_q, const double *q0, const double *q, size_t n_tf, const double *TF_rel0, size_t ld_rel0, const double *TF_abs0, size_t ld_abs0, double *TF_rel, size_t ld_rel, double *TF_abs, size_t ld_abs ) { aa_rx_sg_ensure_clean_frames( scene_graph ); amino::SceneGraph *sg = scene_graph->sg; bool updated[sg->frames.size()]; size_t i_frame = 0; for( size_t i_rel0 = 0, i_abs0 = 0, i_rel = 0, i_abs = 0; i_frame < n_tf && i_frame < sg->frames.size(); i_frame++, i_rel += ld_rel, i_abs += ld_abs, i_rel0 += ld_rel0, i_abs0 += ld_abs0 ) { amino::SceneFrame *f = sg->frames[(aa_rx_frame_id)i_frame]; enum aa_rx_frame_type type = f->type; bool update_abs = 0; bool in_global = f->in_global(); const double *E_rel0 = TF_rel0 + i_rel0; const double *E_abs0 = TF_abs0 + i_abs0; double *E_rel = TF_rel + i_rel; double *E_abs = TF_abs + i_abs; switch( type ) { case AA_RX_FRAME_FIXED: f->tf_rel(q, E_rel); update_abs = !in_global && updated[f->parent_id]; break; case AA_RX_FRAME_REVOLUTE: case AA_RX_FRAME_PRISMATIC: { amino::SceneFrameJoint *fj = static_cast<amino::SceneFrameJoint*>(f); if( aa_feq(q0[fj->config_index], q[fj->config_index], 0 )) { AA_MEM_CPY(E_rel, E_rel0, 7); update_abs = !in_global && updated[f->parent_id]; } else { f->tf_rel(q, E_rel); update_abs = 1; } break; } } if( update_abs ) { // chain to global if( in_global ) { // TODO: can we somehow get rid of this branch? // maybe a separate type for global frames AA_MEM_CPY(E_abs, E_rel, 7); } else { assert( f->parent_id < (ssize_t)i_frame ); double *E_abs_parent = TF_abs + (ld_abs * f->parent_id);; aa_tf_qutr_mul(E_abs_parent, E_rel, E_abs); } updated[i_frame] = 1; } else { AA_MEM_CPY(E_abs, E_abs0, 7); updated[i_frame] = 0; } } }
AA_API int aa_rx_mp_plan( struct aa_rx_mp *mp, double timeout, size_t *n_path, double **p_path_all ) { mp->validity_checker->allow(); amino::sgSpaceInformation::Ptr &si = mp->space_information; /* Configure State Validity Checker */ /* Setup Space */ *n_path = 0; *p_path_all = NULL; amino::sgStateSpace *ss = si->getTypedStateSpace(); ompl::base::ProblemDefinitionPtr &pdef = mp->problem_definition; ompl::base::PlannerPtr planner = (NULL == mp->planner.get()) ? ompl::base::PlannerPtr(new ompl::geometric::RRTConnect(si)) : mp->planner; planner->setProblemDefinition(pdef); try { if( mp->lazy_samples ) { fprintf(stderr, "Starting sampling thread\n"); mp->lazy_samples->clear(); mp->lazy_samples->setStart(ss->config_count_all(), mp->config_start); mp->lazy_samples->startSampling(); } planner->solve(timeout); if( mp->lazy_samples ) { fprintf(stderr, "Stopping sampling thread\n"); mp->lazy_samples->stopSampling(); } } catch(...) { return AA_RX_NO_SOLUTION; } if( pdef->hasSolution() ) { const ompl::base::PathPtr &path_ptr = pdef->getSolutionPath(); ompl::geometric::PathGeometric &path = static_cast<ompl::geometric::PathGeometric&>(*path_ptr); path_cleanup(mp, path); /* Allocate a simple array */ *n_path = path.getStateCount(); *p_path_all = (double*)calloc( *n_path * ss->config_count_all(), sizeof(double) ); /* Fill array */ std::vector< ompl::base::State *> &states = path.getStates(); double *ptr = *p_path_all; for( auto itr = states.begin(); itr != states.end(); itr++, ptr += ss->config_count_all() ) { AA_MEM_CPY( ptr, mp->config_start, ss->config_count_all() ); amino::sgSpaceInformation::StateType *state = amino::sgSpaceInformation::state_as(*itr); ss->insert_state( state, ptr ); } return AA_RX_OK; } else { return AA_RX_NO_SOLUTION | AA_RX_NO_MP; } }
void SceneFrameFixed::tf_rel( const double *q, double _E[7] ) { (void)q; AA_MEM_CPY(_E, this->E, 7); }