void ahrs_propagate(void) { /* unbias gyro */ struct Int32Rates omega; RATES_DIFF(omega, imu.gyro_prev, ahrs_impl.gyro_bias); /* low pass rate */ #ifdef AHRS_PROPAGATE_LOW_PASS_RATES RATES_SMUL(ahrs.imu_rate, ahrs.imu_rate,2); RATES_ADD(ahrs.imu_rate, omega); RATES_SDIV(ahrs.imu_rate, ahrs.imu_rate, 3); #else RATES_COPY(ahrs.imu_rate, omega); #endif /* add correction */ RATES_ADD(omega, ahrs_impl.rate_correction); /* and zeros it */ INT_RATES_ZERO(ahrs_impl.rate_correction); /* integrate quaternion */ INT32_QUAT_INTEGRATE_FI(ahrs.ltp_to_imu_quat, ahrs_impl.high_rez_quat, omega, AHRS_PROPAGATE_FREQUENCY); INT32_QUAT_NORMALIZE(ahrs.ltp_to_imu_quat); compute_imu_euler_and_rmat_from_quat(); compute_body_orientation(); }
void ahrs_icq_propagate(struct Int32Rates *gyro, float dt) { int32_t freq = (int32_t)(1. / dt); /* unbias gyro */ struct Int32Rates omega; RATES_DIFF(omega, *gyro, ahrs_icq.gyro_bias); /* low pass rate */ #ifdef AHRS_PROPAGATE_LOW_PASS_RATES RATES_SMUL(ahrs_icq.imu_rate, ahrs_icq.imu_rate, 2); RATES_ADD(ahrs_icq.imu_rate, omega); RATES_SDIV(ahrs_icq.imu_rate, ahrs_icq.imu_rate, 3); #else RATES_COPY(ahrs_icq.imu_rate, omega); #endif /* add correction */ RATES_ADD(omega, ahrs_icq.rate_correction); /* and zeros it */ INT_RATES_ZERO(ahrs_icq.rate_correction); /* integrate quaternion */ int32_quat_integrate_fi(&ahrs_icq.ltp_to_imu_quat, &ahrs_icq.high_rez_quat, &omega, freq); int32_quat_normalize(&ahrs_icq.ltp_to_imu_quat); // increase accel and mag propagation counters ahrs_icq.accel_cnt++; ahrs_icq.mag_cnt++; }
void ahrs_propagate(void) { /* unbias gyro */ struct Int32Rates omega; RATES_DIFF(omega, imu.gyro_prev, ahrs_impl.gyro_bias); /* low pass rate */ //#ifdef AHRS_PROPAGATE_LOW_PASS_RATES if (gyro_lowpass_filter > 1) { RATES_SMUL(ahrs_impl.imu_rate, ahrs_impl.imu_rate, gyro_lowpass_filter-1); RATES_ADD(ahrs_impl.imu_rate, omega); RATES_SDIV(ahrs_impl.imu_rate, ahrs_impl.imu_rate, gyro_lowpass_filter); //#else } else { RATES_COPY(ahrs_impl.imu_rate, omega); //#endif } /* add correction */ RATES_ADD(omega, ahrs_impl.rate_correction); /* and zeros it */ INT_RATES_ZERO(ahrs_impl.rate_correction); /* integrate quaternion */ INT32_QUAT_INTEGRATE_FI(ahrs_impl.ltp_to_imu_quat, ahrs_impl.high_rez_quat, omega, AHRS_PROPAGATE_FREQUENCY); INT32_QUAT_NORMALIZE(ahrs_impl.ltp_to_imu_quat); set_body_state_from_quat(); }
void stabilization_attitude_ref_update() { #if USE_REF /* dumb integrate reference attitude */ struct FloatRates delta_rate; RATES_SMUL(delta_rate, stab_att_ref_rate, DT_UPDATE); struct FloatEulers delta_angle; EULERS_ASSIGN(delta_angle, delta_rate.p, delta_rate.q, delta_rate.r); EULERS_ADD(stab_att_ref_euler, delta_angle); FLOAT_ANGLE_NORMALIZE(stab_att_ref_euler.psi); /* integrate reference rotational speeds */ struct FloatRates delta_accel; RATES_SMUL(delta_accel, stab_att_ref_accel, DT_UPDATE); RATES_ADD(stab_att_ref_rate, delta_accel); /* compute reference attitude error */ struct FloatEulers ref_err; EULERS_DIFF(ref_err, stab_att_ref_euler, stab_att_sp_euler); /* wrap it in the shortest direction */ FLOAT_ANGLE_NORMALIZE(ref_err.psi); /* compute reference angular accelerations */ stab_att_ref_accel.p = -2.*ZETA_P * OMEGA_P * stab_att_ref_rate.p - OMEGA_P * OMEGA_P * ref_err.phi; stab_att_ref_accel.q = -2.*ZETA_Q * OMEGA_P * stab_att_ref_rate.q - OMEGA_Q * OMEGA_Q * ref_err.theta; stab_att_ref_accel.r = -2.*ZETA_R * OMEGA_P * stab_att_ref_rate.r - OMEGA_R * OMEGA_R * ref_err.psi; /* saturate acceleration */ const struct FloatRates MIN_ACCEL = { -REF_ACCEL_MAX_P, -REF_ACCEL_MAX_Q, -REF_ACCEL_MAX_R }; const struct FloatRates MAX_ACCEL = { REF_ACCEL_MAX_P, REF_ACCEL_MAX_Q, REF_ACCEL_MAX_R }; RATES_BOUND_BOX(stab_att_ref_accel, MIN_ACCEL, MAX_ACCEL); /* saturate speed and trim accel accordingly */ SATURATE_SPEED_TRIM_ACCEL(); #else /* !USE_REF */ EULERS_COPY(stab_att_ref_euler, stabilization_att_sp); FLOAT_RATES_ZERO(stab_att_ref_rate); FLOAT_RATES_ZERO(stab_att_ref_accel); #endif /* USE_REF */ }
void stabilization_attitude_ref_update(void) { /* integrate reference attitude */ #if STABILIZATION_ATTITUDE_REF_QUAT_INFINITESIMAL_STEP struct FloatQuat qdot; FLOAT_QUAT_DERIVATIVE(qdot, stab_att_ref_rate, stab_att_ref_quat); QUAT_SMUL(qdot, qdot, DT_UPDATE); QUAT_ADD(stab_att_ref_quat, qdot); #else // use finite step (involves trig) struct FloatQuat delta_q; FLOAT_QUAT_DIFFERENTIAL(delta_q, stab_att_ref_rate, DT_UPDATE); /* compose new ref_quat by quaternion multiplication of delta rotation and current ref_quat */ struct FloatQuat new_ref_quat; FLOAT_QUAT_COMP(new_ref_quat, delta_q, stab_att_ref_quat); QUAT_COPY(stab_att_ref_quat, new_ref_quat); #endif FLOAT_QUAT_NORMALIZE(stab_att_ref_quat); /* integrate reference rotational speeds */ struct FloatRates delta_rate; RATES_SMUL(delta_rate, stab_att_ref_accel, DT_UPDATE); RATES_ADD(stab_att_ref_rate, delta_rate); /* compute reference angular accelerations */ struct FloatQuat err; /* compute reference attitude error */ FLOAT_QUAT_INV_COMP(err, stab_att_sp_quat, stab_att_ref_quat); /* wrap it in the shortest direction */ FLOAT_QUAT_WRAP_SHORTEST(err); /* propagate the 2nd order linear model */ stab_att_ref_accel.p = -2.*stab_att_ref_model[ref_idx].zeta.p*stab_att_ref_model[ref_idx].omega.p*stab_att_ref_rate.p - stab_att_ref_model[ref_idx].omega.p*stab_att_ref_model[ref_idx].omega.p*err.qx; stab_att_ref_accel.q = -2.*stab_att_ref_model[ref_idx].zeta.q*stab_att_ref_model[ref_idx].omega.q*stab_att_ref_rate.q - stab_att_ref_model[ref_idx].omega.q*stab_att_ref_model[ref_idx].omega.q*err.qy; stab_att_ref_accel.r = -2.*stab_att_ref_model[ref_idx].zeta.r*stab_att_ref_model[ref_idx].omega.r*stab_att_ref_rate.r - stab_att_ref_model[ref_idx].omega.r*stab_att_ref_model[ref_idx].omega.r*err.qz; /* saturate acceleration */ const struct FloatRates MIN_ACCEL = { -REF_ACCEL_MAX_P, -REF_ACCEL_MAX_Q, -REF_ACCEL_MAX_R }; const struct FloatRates MAX_ACCEL = { REF_ACCEL_MAX_P, REF_ACCEL_MAX_Q, REF_ACCEL_MAX_R }; RATES_BOUND_BOX(stab_att_ref_accel, MIN_ACCEL, MAX_ACCEL); /* saturate angular speed and trim accel accordingly */ SATURATE_SPEED_TRIM_ACCEL(); /* compute ref_euler */ FLOAT_EULERS_OF_QUAT(stab_att_ref_euler, stab_att_ref_quat); }
static void send_att_indi(struct transport_tx *trans, struct link_device *dev) { //The estimated G values are scaled, so scale them back before sending struct FloatRates g_est_disp; RATES_SMUL(g_est_disp, g_est, INDI_EST_SCALE); float g2_est_disp = g2_est * INDI_EST_SCALE; pprz_msg_send_STAB_ATTITUDE_INDI(trans, dev, AC_ID, &indi.filtered_rate_deriv.p, &indi.filtered_rate_deriv.q, &indi.filtered_rate_deriv.r, &indi.angular_accel_ref.p, &indi.angular_accel_ref.q, &indi.angular_accel_ref.r, &g_est_disp.p, &g_est_disp.q, &g_est_disp.r, &g2_est_disp); }
static void send_att_indi(struct transport_tx *trans, struct link_device *dev) { //The estimated G values are scaled, so scale them back before sending struct FloatRates g1_disp; RATES_SMUL(g1_disp, indi.est.g1, INDI_EST_SCALE); float g2_disp = indi.est.g2 * INDI_EST_SCALE; pprz_msg_send_STAB_ATTITUDE_INDI(trans, dev, AC_ID, &indi.rate.dx.p, &indi.rate.dx.q, &indi.rate.dx.r, &indi.angular_accel_ref.p, &indi.angular_accel_ref.q, &indi.angular_accel_ref.r, &g1_disp.p, &g1_disp.q, &g1_disp.r, &g2_disp); }
void imu_periodic( void ) { // Start reading the latest gyroscope data if (!imu_krooz.mpu.config.initialized) mpu60x0_i2c_start_configure(&imu_krooz.mpu); if (!imu_krooz.hmc.initialized) hmc58xx_start_configure(&imu_krooz.hmc); if (imu_krooz.meas_nb) { RATES_ASSIGN(imu.gyro_unscaled, -imu_krooz.rates_sum.q / imu_krooz.meas_nb, imu_krooz.rates_sum.p / imu_krooz.meas_nb, imu_krooz.rates_sum.r / imu_krooz.meas_nb); #if IMU_KROOZ_USE_GYRO_MEDIAN_FILTER UpdateMedianFilterRatesInt(median_gyro, imu.gyro_unscaled); #endif VECT3_ASSIGN(imu.accel_unscaled, -imu_krooz.accel_sum.y / imu_krooz.meas_nb, imu_krooz.accel_sum.x / imu_krooz.meas_nb, imu_krooz.accel_sum.z / imu_krooz.meas_nb); #if IMU_KROOZ_USE_ACCEL_MEDIAN_FILTER UpdateMedianFilterVect3Int(median_accel, imu.accel_unscaled); #endif RATES_SMUL(imu_krooz.gyro_filtered, imu_krooz.gyro_filtered, IMU_KROOZ_GYRO_AVG_FILTER); RATES_ADD(imu_krooz.gyro_filtered, imu.gyro_unscaled); RATES_SDIV(imu_krooz.gyro_filtered, imu_krooz.gyro_filtered, (IMU_KROOZ_GYRO_AVG_FILTER + 1)); RATES_COPY(imu.gyro_unscaled, imu_krooz.gyro_filtered); VECT3_SMUL(imu_krooz.accel_filtered, imu_krooz.accel_filtered, IMU_KROOZ_ACCEL_AVG_FILTER); VECT3_ADD(imu_krooz.accel_filtered, imu.accel_unscaled); VECT3_SDIV(imu_krooz.accel_filtered, imu_krooz.accel_filtered, (IMU_KROOZ_ACCEL_AVG_FILTER + 1)); VECT3_COPY(imu.accel_unscaled, imu_krooz.accel_filtered); RATES_ASSIGN(imu_krooz.rates_sum, 0, 0, 0); VECT3_ASSIGN(imu_krooz.accel_sum, 0, 0, 0); imu_krooz.meas_nb = 0; imu_krooz.gyr_valid = TRUE; imu_krooz.acc_valid = TRUE; } //RunOnceEvery(10,imu_krooz_downlink_raw()); }
void stabilization_attitude_ref_update() { /* integrate reference attitude */ struct FloatQuat qdot; FLOAT_QUAT_DERIVATIVE(qdot, stab_att_ref_rate, stab_att_ref_quat); QUAT_SMUL(qdot, qdot, DT_UPDATE); QUAT_ADD(stab_att_ref_quat, qdot); FLOAT_QUAT_NORMALIZE(stab_att_ref_quat); /* integrate reference rotational speeds */ struct FloatRates delta_rate; RATES_SMUL(delta_rate, stab_att_ref_accel, DT_UPDATE); RATES_ADD(stab_att_ref_rate, delta_rate); /* compute reference angular accelerations */ struct FloatQuat err; /* compute reference attitude error */ FLOAT_QUAT_INV_COMP(err, stab_att_sp_quat, stab_att_ref_quat); /* wrap it in the shortest direction */ FLOAT_QUAT_WRAP_SHORTEST(err); /* propagate the 2nd order linear model */ stab_att_ref_accel.p = -2.*stab_att_ref_model[ref_idx].zeta.p*stab_att_ref_model[ref_idx].omega.p*stab_att_ref_rate.p - stab_att_ref_model[ref_idx].omega.p*stab_att_ref_model[ref_idx].omega.p*err.qx; stab_att_ref_accel.q = -2.*stab_att_ref_model[ref_idx].zeta.q*stab_att_ref_model[ref_idx].omega.q*stab_att_ref_rate.q - stab_att_ref_model[ref_idx].omega.q*stab_att_ref_model[ref_idx].omega.q*err.qy; stab_att_ref_accel.r = -2.*stab_att_ref_model[ref_idx].zeta.r*stab_att_ref_model[ref_idx].omega.r*stab_att_ref_rate.r - stab_att_ref_model[ref_idx].omega.r*stab_att_ref_model[ref_idx].omega.r*err.qz; /* saturate acceleration */ const struct FloatRates MIN_ACCEL = { -REF_ACCEL_MAX_P, -REF_ACCEL_MAX_Q, -REF_ACCEL_MAX_R }; const struct FloatRates MAX_ACCEL = { REF_ACCEL_MAX_P, REF_ACCEL_MAX_Q, REF_ACCEL_MAX_R }; RATES_BOUND_BOX(stab_att_ref_accel, MIN_ACCEL, MAX_ACCEL); /* compute ref_euler */ FLOAT_EULERS_OF_QUAT(stab_att_ref_euler, stab_att_ref_quat); }
void UM6_packet_read_message(void) { if ((UM6_status == UM6Running) && PacketLength > 11) { switch (PacketAddr) { case IMU_UM6_GYRO_PROC: UM6_rate.p = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 0] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 1]))) * 0.0610352; UM6_rate.q = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 2] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 3]))) * 0.0610352; UM6_rate.r = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 4] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 5]))) * 0.0610352; RATES_SMUL(UM6_rate, UM6_rate, 0.0174532925); //Convert deg/sec to rad/sec RATES_BFP_OF_REAL(imu.gyro, UM6_rate); break; case IMU_UM6_ACCEL_PROC: UM6_accel.x = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 0] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 1]))) * 0.000183105; UM6_accel.y = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 2] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 3]))) * 0.000183105; UM6_accel.z = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 4] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 5]))) * 0.000183105; VECT3_SMUL(UM6_accel, UM6_accel, 9.80665); //Convert g into m/s2 ACCELS_BFP_OF_REAL(imu.accel, UM6_accel); // float to int break; case IMU_UM6_MAG_PROC: UM6_mag.x = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 0] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 1]))) * 0.000305176; UM6_mag.y = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 2] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 3]))) * 0.000305176; UM6_mag.z = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 4] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 5]))) * 0.000305176; // Assume the same units for magnetic field // Magnitude at the Earth's surface ranges from 25 to 65 microteslas (0.25 to 0.65 gauss). MAGS_BFP_OF_REAL(imu.mag, UM6_mag); break; case IMU_UM6_EULER: UM6_eulers.phi = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 0] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 1]))) * 0.0109863; UM6_eulers.theta = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 2] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 3]))) * 0.0109863; UM6_eulers.psi = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 4] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 5]))) * 0.0109863; EULERS_SMUL(UM6_eulers, UM6_eulers, 0.0174532925); //Convert deg to rad EULERS_BFP_OF_REAL(ahrs_impl.ltp_to_imu_euler, UM6_eulers); break; case IMU_UM6_QUAT: UM6_quat.qi = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 0] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 1]))) * 0.0000335693; UM6_quat.qx = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 2] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 3]))) * 0.0000335693; UM6_quat.qy = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 4] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 5]))) * 0.0000335693; UM6_quat.qz = ((float)((int16_t) ((UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 6] << 8) | UM6_packet.msg_buf[IMU_UM6_DATA_OFFSET + 7]))) * 0.0000335693; QUAT_BFP_OF_REAL(ahrs_impl.ltp_to_imu_quat, UM6_quat); break; default: break; } } }