void orientationCalcEulers_f(struct OrientationReps* orientation) {
  if (bit_is_set(orientation->status, ORREP_EULER_F))
    return;

  if (bit_is_set(orientation->status, ORREP_EULER_I)) {
    EULERS_FLOAT_OF_BFP(orientation->eulers_f, orientation->eulers_i);
  }
  else if (bit_is_set(orientation->status, ORREP_RMAT_F)) {
    FLOAT_EULERS_OF_RMAT(orientation->eulers_f, orientation->rmat_f);
  }
  else if (bit_is_set(orientation->status, ORREP_QUAT_F)) {
    FLOAT_EULERS_OF_QUAT(orientation->eulers_f, orientation->quat_f);
  }
  else if (bit_is_set(orientation->status, ORREP_RMAT_I)) {
    RMAT_FLOAT_OF_BFP(orientation->rmat_f, orientation->rmat_i);
    SetBit(orientation->status, ORREP_RMAT_F);
    FLOAT_EULERS_OF_RMAT(orientation->eulers_f, orientation->rmat_f);
  }
  else if (bit_is_set(orientation->status, ORREP_QUAT_I)) {
    QUAT_FLOAT_OF_BFP(orientation->quat_f, orientation->quat_i);
    SetBit(orientation->status, ORREP_QUAT_F);
    FLOAT_EULERS_OF_QUAT(orientation->eulers_f, orientation->quat_f);
  }
  /* set bit to indicate this representation is computed */
  SetBit(orientation->status, ORREP_EULER_F);
}
Exemplo n.º 2
0
void ahrs_update_mag_2d_dumb(void) {

  /* project mag on local tangeant plane */
  struct FloatEulers ltp_to_imu_euler;
  FLOAT_EULERS_OF_RMAT(ltp_to_imu_euler, ahrs_impl.ltp_to_imu_rmat);
  struct FloatVect3 magf;
  MAGS_FLOAT_OF_BFP(magf, imu.mag);
  const float cphi   = cosf(ltp_to_imu_euler.phi);
  const float sphi   = sinf(ltp_to_imu_euler.phi);
  const float ctheta = cosf(ltp_to_imu_euler.theta);
  const float stheta = sinf(ltp_to_imu_euler.theta);
  const float mn = ctheta * magf.x + sphi*stheta*magf.y + cphi*stheta*magf.z;
  const float me =     0. * magf.x + cphi       *magf.y - sphi       *magf.z;

  const float res_norm = -RMAT_ELMT(ahrs_impl.ltp_to_imu_rmat, 0,0) * me +
                          RMAT_ELMT(ahrs_impl.ltp_to_imu_rmat, 1,0) * mn;
  const struct FloatVect3 r2 = {RMAT_ELMT(ahrs_impl.ltp_to_imu_rmat, 2,0),
                                RMAT_ELMT(ahrs_impl.ltp_to_imu_rmat, 2,1),
                                RMAT_ELMT(ahrs_impl.ltp_to_imu_rmat, 2,2)};
  const float mag_rate_update_gain = 2.5;
  FLOAT_RATES_ADD_SCALED_VECT(ahrs_impl.rate_correction, r2, (mag_rate_update_gain*res_norm));
  const float mag_bias_update_gain = -2.5e-4;
  FLOAT_RATES_ADD_SCALED_VECT(ahrs_impl.gyro_bias, r2, (mag_bias_update_gain*res_norm));

}
Exemplo n.º 3
0
/*
 * Compute body orientation and rates from imu orientation and rates
 */
static inline void compute_body_orientation_and_rates(void) {

  FLOAT_QUAT_COMP_INV(ahrs_float.ltp_to_body_quat,
                      ahrs_float.ltp_to_imu_quat, ahrs_impl.body_to_imu_quat);
  FLOAT_RMAT_COMP_INV(ahrs_float.ltp_to_body_rmat,
                      ahrs_float.ltp_to_imu_rmat, ahrs_impl.body_to_imu_rmat);
  FLOAT_EULERS_OF_RMAT(ahrs_float.ltp_to_body_euler, ahrs_float.ltp_to_body_rmat);
  FLOAT_RMAT_TRANSP_RATEMULT(ahrs_float.body_rate, ahrs_impl.body_to_imu_rmat, ahrs_float.imu_rate);

}
Exemplo n.º 4
0
/*
 * Compute body orientation and rates from imu orientation and rates
 */
static inline void set_body_orientation_and_rates(void) {

  struct FloatRates body_rate;
  FLOAT_RMAT_TRANSP_RATEMULT(body_rate, ahrs_impl.body_to_imu_rmat, ahrs_impl.imu_rate);
  stateSetBodyRates_f(&body_rate);

  struct FloatRMat ltp_to_imu_rmat, ltp_to_body_rmat;
  FLOAT_RMAT_OF_EULERS(ltp_to_imu_rmat, ahrs_impl.ltp_to_imu_euler);
  FLOAT_RMAT_COMP_INV(ltp_to_body_rmat, ltp_to_imu_rmat, ahrs_impl.body_to_imu_rmat);

  // Some stupid lines of code for neutrals
  struct FloatEulers ltp_to_body_euler;
  FLOAT_EULERS_OF_RMAT(ltp_to_body_euler, ltp_to_body_rmat);
  ltp_to_body_euler.phi -= ins_roll_neutral;
  ltp_to_body_euler.theta -= ins_pitch_neutral;
  stateSetNedToBodyEulers_f(&ltp_to_body_euler);

  // should be replaced at the end by:
  //   stateSetNedToBodyRMat_f(&ltp_to_body_rmat);

}
Exemplo n.º 5
0
float test_quat_of_rmat(void) {

    //  struct FloatEulers eul = {-0.280849, 0.613423, -1.850440};
    struct FloatEulers eul = {RadOfDeg(0.131579),  RadOfDeg(-62.397659), RadOfDeg(-110.470299)};
    //  struct FloatEulers eul = {RadOfDeg(0.13), RadOfDeg(180.), RadOfDeg(-61.)};

    struct FloatMat33 rm;
    FLOAT_RMAT_OF_EULERS(rm, eul);
    DISPLAY_FLOAT_RMAT_AS_EULERS_DEG("rmat", rm);

    struct FloatQuat q;
    FLOAT_QUAT_OF_RMAT(q, rm);
    DISPLAY_FLOAT_QUAT("q_of_rm   ", q);
    DISPLAY_FLOAT_QUAT_AS_EULERS_DEG("q_of_rm   ", q);

    struct FloatQuat qref;
    FLOAT_QUAT_OF_EULERS(qref, eul);
    DISPLAY_FLOAT_QUAT("q_of_euler", qref);
    DISPLAY_FLOAT_QUAT_AS_EULERS_DEG("q_of_euler", qref);

    printf("\n\n\n");

    struct FloatMat33 r_att;
    struct FloatEulers e312 = { eul.phi, eul.theta, eul.psi };
    FLOAT_RMAT_OF_EULERS_312(r_att, e312);
    DISPLAY_FLOAT_RMAT("r_att  ", r_att);
    DISPLAY_FLOAT_RMAT_AS_EULERS_DEG("r_att  ", r_att);

    struct FloatQuat q_att;
    FLOAT_QUAT_OF_RMAT(q_att, r_att);

    struct FloatEulers e_att;
    FLOAT_EULERS_OF_RMAT(e_att, r_att);
    DISPLAY_FLOAT_EULERS_DEG("of rmat", e_att);

    FLOAT_EULERS_OF_QUAT(e_att, q_att);
    DISPLAY_FLOAT_EULERS_DEG("of quat", e_att);

    return 0.;
}
Exemplo n.º 6
0
float test_eulers_of_rmat(struct FloatRMat frm, int display) {

    struct FloatEulers fe;
    FLOAT_EULERS_OF_RMAT(fe, frm);
    struct Int32RMat irm;
    RMAT_BFP_OF_REAL(irm, frm);
    struct Int32Eulers ie;
    INT32_EULERS_OF_RMAT(ie, irm);
    struct FloatEulers fe2;
    EULERS_FLOAT_OF_BFP(fe2, ie);
    EULERS_SUB(fe2, ie);
    float norm_err = FLOAT_EULERS_NORM(fe2);

    if (display) {
        printf("euler of rmat\n");
        //    DISPLAY_FLOAT_RMAT("fr", fr);
        DISPLAY_FLOAT_EULERS_DEG("fe", fe);
        DISPLAY_INT32_EULERS("ie", ie);
        DISPLAY_INT32_EULERS_AS_FLOAT_DEG("ieaf", ie);
    }

    return norm_err;

}
Exemplo n.º 7
0
static void ahrs_do_update_mag(void) {
	int i, j, k;

#ifdef BAFL_DEBUG2
	printf("Mag update.\n");
#endif

	MAGS_FLOAT_OF_BFP(bafl_mag, imu.mag);

	/* P_prio = P */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_Pprio[i][j] = bafl_P[i][j];
		}
	}

	/*
	 * set up measurement matrix
	 *
	 * h = [236.; -2.; 396.];
	 * hx = [ h(2); -h(1); 0]; //only psi (phi and theta = 0)
	 * Hm = Cnb * hx;
	 * H = [ 0  0        0  0  0
	 *       0  0 Cnb*hx 0  0  0
	 *       0  0        0  0  0 ];
	 * */
	/*bafl_H[0][2] = RMAT_ELMT(bafl_dcm, 0, 0) * bafl_h.y - RMAT_ELMT(bafl_dcm, 0, 1) * bafl_h.x;
	bafl_H[1][2] = RMAT_ELMT(bafl_dcm, 1, 0) * bafl_h.y - RMAT_ELMT(bafl_dcm, 1, 1) * bafl_h.x;
	bafl_H[2][2] = RMAT_ELMT(bafl_dcm, 2, 0) * bafl_h.y - RMAT_ELMT(bafl_dcm, 2, 1) * bafl_h.x;*/
	bafl_H[0][2] = -RMAT_ELMT(bafl_dcm, 0, 1);
	bafl_H[1][2] = -RMAT_ELMT(bafl_dcm, 1, 1);
	bafl_H[2][2] = -RMAT_ELMT(bafl_dcm, 2, 1);
	//rest is zero

	/**********************************************
	 * compute Kalman gain K
	 *
	 * K = P_prio * H_T * inv(S)
	 * S = H * P_prio * HT + R
	 *
	 * K = P_prio * HT * inv(H * P_prio * HT + R)
	 *
	 **********************************************/

	/* covariance residual S = H * P_prio * HT + R    */

	/* temp_S(3x6) = H(3x6) * P_prio(6x6)
	 *
	 * only third column of H is non-zero
	 */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_tempS[i][j] = bafl_H[i][2] * bafl_Pprio[2][j];
		}
	}

	/* S(3x3) = temp_S(3x6) * HT(6x3) + R(3x3)
	 *
	 * only third row of HT is non-zero
	 */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < 3; j++) {
			bafl_S[i][j] = bafl_tempS[i][2] * bafl_H[j][2]; /* H[j][2] = HT[2][j] */
		}
		bafl_S[i][i] += bafl_R_mag;
	}

	/* invert S
	 */
	FLOAT_MAT33_INVERT(bafl_invS, bafl_S);

	/* temp_K(6x3) = P_prio(6x6) * HT(6x3)
	 *
	 * only third row of HT is non-zero
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < 3; j++) {
			/* not computing zero elements */
			bafl_tempK[i][j] = bafl_Pprio[i][2] * bafl_H[j][2]; /* H[j][2] = HT[2][j] */
		}
	}

	/* K(6x3) = temp_K(6x3) * invS(3x3)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < 3; j++) {
			bafl_K[i][j]  = bafl_tempK[i][0] * bafl_invS[0][j];
			bafl_K[i][j] += bafl_tempK[i][1] * bafl_invS[1][j];
			bafl_K[i][j] += bafl_tempK[i][2] * bafl_invS[2][j];
		}
	}

	/**********************************************
	 * Update filter state.
	 *
	 *  The a priori filter state is zero since the errors are corrected after each update.
	 *
	 *  X = X_prio + K (y - H * X_prio)
	 *  X = K * y
	 **********************************************/

	/*  innovation
	 *  y = Cnb * [hx; hy; hz] - mag
	 */
	FLOAT_RMAT_VECT3_MUL(bafl_ym, bafl_dcm, bafl_h); //can be optimized
	FLOAT_VECT3_SUB(bafl_ym, bafl_mag);

	/* X(6) = K(6x3) * y(3)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		bafl_X[i]  = bafl_K[i][0] * bafl_ym.x;
		bafl_X[i] += bafl_K[i][1] * bafl_ym.y;
		bafl_X[i] += bafl_K[i][2] * bafl_ym.z;
	}

	/**********************************************
	 * Update the filter covariance.
	 *
	 *  P = P_prio - K * H * P_prio
	 *  P = ( I - K * H ) * P_prio
	 *
	 **********************************************/

	/*  temp(6x6) = I(6x6) - K(6x3)*H(3x6)
	 *
	 *  last 3 columns of H are zero
	 */
	for (i = 0; i < 6; i++) {
		for (j = 0; j < 6; j++) {
			if (i == j) {
				bafl_tempP[i][j] = 1.;
			} else {
				bafl_tempP[i][j] = 0.;
			}
			if (j == 2) { /* omit the parts where H is zero */
				for (k = 0; k < 3; k++) {
					bafl_tempP[i][j] -= bafl_K[i][k] * bafl_H[k][j];
				}
			}
		}
	}

	/*  P(6x6) = temp(6x6) * P_prio(6x6)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_P[i][j] = bafl_tempP[i][0] * bafl_Pprio[0][j];
			for (k = 1; k < BAFL_SSIZE; k++) {
				bafl_P[i][j] += bafl_tempP[i][k] * bafl_Pprio[k][j];
			}
		}
	}

#ifdef LKF_PRINT_P
	printf("Pum=");
	for (i = 0; i < 6; i++) {
		printf("[");
		for (j = 0; j < 6; j++) {
			printf("%f\t", bafl_P[i][j]);
		}
		printf("]\n    ");
	}
	printf("\n");
#endif

	/**********************************************
	 *  Correct errors.
	 *
	 *
	 **********************************************/

	/*  Error quaternion.
	 */
	QUAT_ASSIGN(bafl_q_m_err, 1.0, bafl_X[0]/2, bafl_X[1]/2, bafl_X[2]/2);
	FLOAT_QUAT_INVERT(bafl_q_m_err, bafl_q_m_err);
	/* normalize */
	float q_sq;
	q_sq = bafl_q_m_err.qx * bafl_q_m_err.qx + bafl_q_m_err.qy * bafl_q_m_err.qy + bafl_q_m_err.qz * bafl_q_m_err.qz;
	if (q_sq > 1) { /* this should actually never happen */
		FLOAT_QUAT_SMUL(bafl_q_m_err, bafl_q_m_err, 1 / sqrtf(1 + q_sq));
		printf("mag error quaternion q_sq > 1!!\n");
	} else {
		bafl_q_m_err.qi = sqrtf(1 - q_sq);
	}

	/*  correct attitude
	 */
	FLOAT_QUAT_COMP(bafl_qtemp, bafl_q_m_err, bafl_quat);
	FLOAT_QUAT_COPY(bafl_quat, bafl_qtemp);

	/*  correct gyro bias
	 */
	RATES_ASSIGN(bafl_b_m_err, bafl_X[3], bafl_X[4], bafl_X[5]);
	RATES_SUB(bafl_bias, bafl_b_m_err);

	/*
	 *  compute all representations
	 */
	/* maintain rotation matrix representation */
	FLOAT_RMAT_OF_QUAT(bafl_dcm, bafl_quat);
	/* maintain euler representation */
	FLOAT_EULERS_OF_RMAT(bafl_eulers, bafl_dcm);
	AHRS_TO_BFP();
	AHRS_LTP_TO_BODY();
}
Exemplo n.º 8
0
static void ahrs_do_update_accel(void) {
	int i, j, k;

#ifdef BAFL_DEBUG2
	printf("Accel update.\n");
#endif

	/* P_prio = P */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_Pprio[i][j] = bafl_P[i][j];
		}
	}

	/*
	 * set up measurement matrix
	 *
	 * g = 9.81
	 * gx = [ 0 -g  0
	 *        g  0  0
	 *        0  0  0 ]
	 * H = [          0 0 0 ]
	 *	   [ -Cnb*gx  0 0 0 ]
	 *     [          0 0 0 ]
	 *
	 * */
	bafl_H[0][0] = -RMAT_ELMT(bafl_dcm, 0, 1) * BAFL_g;
	bafl_H[0][1] =  RMAT_ELMT(bafl_dcm, 0, 0) * BAFL_g;
	bafl_H[1][0] = -RMAT_ELMT(bafl_dcm, 1, 1) * BAFL_g;
	bafl_H[1][1] =  RMAT_ELMT(bafl_dcm, 1, 0) * BAFL_g;
	bafl_H[2][0] = -RMAT_ELMT(bafl_dcm, 2, 1) * BAFL_g;
	bafl_H[2][1] =  RMAT_ELMT(bafl_dcm, 2, 0) * BAFL_g;
	/* rest is zero
	bafl_H[0][2] = 0.;
	bafl_H[1][2] = 0.;
	bafl_H[2][2] = 0.;*/

	/**********************************************
	 * compute Kalman gain K
	 *
	 * K = P_prio * H_T * inv(S)
	 * S = H * P_prio * HT + R
	 *
	 * K = P_prio * HT * inv(H * P_prio * HT + R)
	 *
	 **********************************************/

	/* covariance residual S = H * P_prio * HT + R    */

	/* temp_S(3x6) = H(3x6) * P_prio(6x6)
	 * last 4 columns of H are zero
	 */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_tempS[i][j]  = bafl_H[i][0] * bafl_Pprio[0][j];
			bafl_tempS[i][j] += bafl_H[i][1] * bafl_Pprio[1][j];
		}
	}

	/* S(3x3) = temp_S(3x6) * HT(6x3) + R(3x3)
	 *
	 * bottom 4 rows of HT are zero
	 */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < 3; j++) {
			bafl_S[i][j]  = bafl_tempS[i][0] * bafl_H[j][0]; /* H[j][0] = HT[0][j] */
			bafl_S[i][j] += bafl_tempS[i][1] * bafl_H[j][1]; /* H[j][0] = HT[0][j] */
		}
		bafl_S[i][i] += bafl_R_accel;
	}

	/* invert S
	 */
	FLOAT_MAT33_INVERT(bafl_invS, bafl_S);


	/* temp_K(6x3) = P_prio(6x6) * HT(6x3)
	 *
	 * bottom 4 rows of HT are zero
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < 3; j++) {
			/* not computing zero elements */
			bafl_tempK[i][j]  = bafl_Pprio[i][0] * bafl_H[j][0]; /* H[j][0] = HT[0][j] */
			bafl_tempK[i][j] += bafl_Pprio[i][1] * bafl_H[j][1]; /* H[j][1] = HT[1][j] */
		}
	}

	/* K(6x3) = temp_K(6x3) * invS(3x3)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < 3; j++) {
			bafl_K[i][j]  = bafl_tempK[i][0] * bafl_invS[0][j];
			bafl_K[i][j] += bafl_tempK[i][1] * bafl_invS[1][j];
			bafl_K[i][j] += bafl_tempK[i][2] * bafl_invS[2][j];
		}
	}

	/**********************************************
	 * Update filter state.
	 *
	 *  The a priori filter state is zero since the errors are corrected after each update.
	 *
	 *  X = X_prio + K (y - H * X_prio)
	 *  X = K * y
	 **********************************************/

	/*printf("R = ");
	for (i = 0; i < 3; i++) {
		printf("[");
		for (j = 0; j < 3; j++) {
			printf("%f\t", RMAT_ELMT(bafl_dcm, i, j));
		}
		printf("]\n    ");
	}
	printf("\n");*/

	/* innovation
	 * y = Cnb * -[0; 0; g] - accel
	 */
	bafl_ya.x = -RMAT_ELMT(bafl_dcm, 0, 2) * BAFL_g - bafl_accel_measure.x;
	bafl_ya.y = -RMAT_ELMT(bafl_dcm, 1, 2) * BAFL_g - bafl_accel_measure.y;
	bafl_ya.z = -RMAT_ELMT(bafl_dcm, 2, 2) * BAFL_g - bafl_accel_measure.z;

	/* X(6) = K(6x3) * y(3)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		bafl_X[i]  = bafl_K[i][0] * bafl_ya.x;
		bafl_X[i] += bafl_K[i][1] * bafl_ya.y;
		bafl_X[i] += bafl_K[i][2] * bafl_ya.z;
	}

	/**********************************************
	 * Update the filter covariance.
	 *
	 *  P = P_prio - K * H * P_prio
	 *  P = ( I - K * H ) * P_prio
	 *
	 **********************************************/

	/*  temp(6x6) = I(6x6) - K(6x3)*H(3x6)
	 *
	 *  last 4 columns of H are zero
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			if (i == j) {
				bafl_tempP[i][j] = 1.;
			} else {
				bafl_tempP[i][j] = 0.;
			}
			if (j < 2) { /* omit the parts where H is zero */
				for (k = 0; k < 3; k++) {
					bafl_tempP[i][j] -= bafl_K[i][k] * bafl_H[k][j];
				}
			}
		}
	}

	/*  P(6x6) = temp(6x6) * P_prio(6x6)
	 */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_P[i][j] = bafl_tempP[i][0] * bafl_Pprio[0][j];
			for (k = 1; k < BAFL_SSIZE; k++) {
				bafl_P[i][j] += bafl_tempP[i][k] * bafl_Pprio[k][j];
			}
		}
	}

#ifdef LKF_PRINT_P
	printf("Pua=");
	for (i = 0; i < 6; i++) {
		printf("[");
		for (j = 0; j < 6; j++) {
			printf("%f\t", bafl_P[i][j]);
		}
		printf("]\n    ");
	}
	printf("\n");
#endif

	/**********************************************
	 *  Correct errors.
	 *
	 *
	 **********************************************/

	/*  Error quaternion.
	 */
	QUAT_ASSIGN(bafl_q_a_err, 1.0, bafl_X[0]/2, bafl_X[1]/2, bafl_X[2]/2);
	FLOAT_QUAT_INVERT(bafl_q_a_err, bafl_q_a_err);

	/* normalize
	 * Is this needed? Or is the approximation good enough?*/
	float q_sq;
	q_sq = bafl_q_a_err.qx * bafl_q_a_err.qx + bafl_q_a_err.qy * bafl_q_a_err.qy + bafl_q_a_err.qz * bafl_q_a_err.qz;
	if (q_sq > 1) { /* this should actually never happen */
		FLOAT_QUAT_SMUL(bafl_q_a_err, bafl_q_a_err, 1 / sqrtf(1 + q_sq));
		printf("Accel error quaternion q_sq > 1!!\n");
	} else {
		bafl_q_a_err.qi = sqrtf(1 - q_sq);
	}

	/*  correct attitude
	 */
	FLOAT_QUAT_COMP(bafl_qtemp, bafl_q_a_err, bafl_quat);
	FLOAT_QUAT_COPY(bafl_quat, bafl_qtemp);


	/*  correct gyro bias
	 */
	RATES_ASSIGN(bafl_b_a_err, bafl_X[3], bafl_X[4], bafl_X[5]);
	RATES_SUB(bafl_bias, bafl_b_a_err);

	/*
	 *  compute all representations
	 */
	/* maintain rotation matrix representation */
	FLOAT_RMAT_OF_QUAT(bafl_dcm, bafl_quat);
	/* maintain euler representation */
	FLOAT_EULERS_OF_RMAT(bafl_eulers, bafl_dcm);
	AHRS_TO_BFP();
	AHRS_LTP_TO_BODY();
}
Exemplo n.º 9
0
/*
 * Propagate our dynamic system in time
 *
 * Run the strapdown computation and the predict step of the filter
 *
 *
 *      quat_dot = Wxq(pqr) * quat
 *      bias_dot = 0
 *
 * Wxq is the quaternion omega matrix:
 *
 *              [ 0, -p, -q, -r ]
 *      1/2 *   [ p,  0,  r, -q ]
 *              [ q, -r,  0,  p ]
 *              [ r,  q, -p,  0 ]
 *
 */
void ahrs_propagate(void) {
	int i, j, k;

    ahrs_lowpass_accel();

	/* compute unbiased rates */
	RATES_FLOAT_OF_BFP(bafl_rates, imu.gyro);
	RATES_SUB(bafl_rates, bafl_bias);


	/* run strapdown computation
	 *
	 */

	/* multiplicative quaternion update */
	/* compute correction quaternion */
	QUAT_ASSIGN(bafl_qr, 1., bafl_rates.p * BAFL_DT / 2, bafl_rates.q * BAFL_DT / 2, bafl_rates.r * BAFL_DT / 2);
	/* normalize it. maybe not necessary?? */
	float q_sq;
	q_sq = (bafl_qr.qx * bafl_qr.qx +bafl_qr.qy * bafl_qr.qy + bafl_qr.qz * bafl_qr.qz) / 4;
	if (q_sq > 1) { /* this should actually never happen */
		FLOAT_QUAT_SMUL(bafl_qr, bafl_qr, 1 / sqrtf(1 + q_sq));
	} else {
		bafl_qr.qi = sqrtf(1 - q_sq);
	}
	/* propagate correction quaternion */
	FLOAT_QUAT_COMP(bafl_qtemp, bafl_quat, bafl_qr);
	FLOAT_QUAT_COPY(bafl_quat, bafl_qtemp);

	bafl_qnorm = FLOAT_QUAT_NORM(bafl_quat);
	//TODO check if broot force normalization is good, use lagrange normalization?
	FLOAT_QUAT_NORMALISE(bafl_quat);


	/*
	 *  compute all representations
	 */
	/* maintain rotation matrix representation */
	FLOAT_RMAT_OF_QUAT(bafl_dcm, bafl_quat);
	/* maintain euler representation */
	FLOAT_EULERS_OF_RMAT(bafl_eulers, bafl_dcm);
	AHRS_TO_BFP();
	AHRS_LTP_TO_BODY();


	/* error KF-Filter
	 * propagate only the error covariance matrix since error is corrected after each measurement
	 *
	 * F = [ 0  0  0          ]
	 *     [ 0  0  0   -Cbn   ]
	 *     [ 0  0  0          ]
	 *     [ 0  0  0  0  0  0 ]
	 *     [ 0  0  0  0  0  0 ]
	 *     [ 0  0  0  0  0  0 ]
	 *
	 * T = e^(dt * F)
	 *
	 * T = [ 1   0   0             ]
	 *     [ 0   1   0   -Cbn*dt   ]
	 *     [ 0   0   1             ]
	 *     [ 0   0   0   1   0   0 ]
	 *     [ 0   0   0   0   1   0 ]
	 *     [ 0   0   0   0   0   1 ]
	 *
	 * P_prio = T * P * T_T + Q
	 *
	 */

	/*
	 *  compute state transition matrix T
	 *
	 *  diagonal elements of T are always one
	 */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < 3; j++) {
			bafl_T[i][j + 3] = -RMAT_ELMT(bafl_dcm, j, i); /* inverted bafl_dcm */
		}
	}


	/*
	 * estimate the a priori error covariance matrix P_k = T * P_k-1 * T_T + Q
	 */
	/* Temporary multiplication temp(6x6) = T(6x6) * P(6x6)      */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_tempP[i][j] = 0.;
			for (k = 0; k < BAFL_SSIZE; k++) {
				bafl_tempP[i][j] += bafl_T[i][k] * bafl_P[k][j];
			}
		}
	}

	/* P_k(6x6) = temp(6x6) * T_T(6x6) + Q  */
	for (i = 0; i < BAFL_SSIZE; i++) {
		for (j = 0; j < BAFL_SSIZE; j++) {
			bafl_P[i][j] = 0.;
			for (k = 0; k < BAFL_SSIZE; k++) {
				bafl_P[i][j] += bafl_tempP[i][k] * bafl_T[j][k]; //T[j][k] = T_T[k][j]
			}
		}
		if (i<3) {
			bafl_P[i][i] += bafl_Q_att;
		} else {
			bafl_P[i][i] += bafl_Q_gyro;
		}
	}

#ifdef LKF_PRINT_P
	printf("Pp =");
	for (i = 0; i < 6; i++) {
		printf("[");
		for (j = 0; j < 6; j++) {
			printf("%f\t", bafl_P[i][j]);
		}
		printf("]\n    ");
	}
	printf("\n");
#endif
}
Exemplo n.º 10
0
static inline void compute_imu_rmat_and_euler_from_quat(void) {
  FLOAT_RMAT_OF_QUAT(ahrs_float.ltp_to_imu_rmat, ahrs_float.ltp_to_imu_quat);
  FLOAT_EULERS_OF_RMAT(ahrs_float.ltp_to_imu_euler, ahrs_float.ltp_to_imu_rmat);
}