void FilterBase::validateDelta(double &delta) { // This handles issues with ROS time when use_sim_time is on and we're playing from bags. if (delta > 100000.0) { FB_DEBUG("Delta was very large. Suspect playing from bag file. Setting to 0.01\n"); delta = 0.01; } }
bool FilterBase::checkMahalanobisThreshold(const Eigen::VectorXd &innovation, const Eigen::MatrixXd &invCovariance, const double nsigmas) { double sqMahalanobis = innovation.dot(invCovariance * innovation); double threshold = nsigmas * nsigmas; if (sqMahalanobis >= threshold) { FB_DEBUG("Innovation mahalanobis distance test failed. Squared Mahalanobis is: " << sqMahalanobis << "\n" << "Threshold is: " << threshold << "\n" << "Innovation is: " << innovation << "\n" << "Innovation covariance is:\n" << invCovariance << "\n"); return false; } return true; }
void Ukf::correct(const Measurement &measurement) { FB_DEBUG("---------------------- Ukf::correct ----------------------\n" << "State is:\n" << state_ << "\nMeasurement is:\n" << measurement.measurement_ << "\nMeasurement covariance is:\n" << measurement.covariance_ << "\n"); // In our implementation, it may be that after we call predict once, we call correct // several times in succession (multiple measurements with different time stamps). In // that event, the sigma points need to be updated to reflect the current state. if(!uncorrected_) { // Take the square root of a small fraction of the estimateErrorCovariance_ using LL' decomposition weightedCovarSqrt_ = ((STATE_SIZE + lambda_) * estimateErrorCovariance_).llt().matrixL(); // Compute sigma points // First sigma point is the current state sigmaPoints_[0] = state_; // Next STATE_SIZE sigma points are state + weightedCovarSqrt_[ith column] // STATE_SIZE sigma points after that are state - weightedCovarSqrt_[ith column] for(size_t sigmaInd = 0; sigmaInd < STATE_SIZE; ++sigmaInd) { sigmaPoints_[sigmaInd + 1] = state_ + weightedCovarSqrt_.col(sigmaInd); sigmaPoints_[sigmaInd + 1 + STATE_SIZE] = state_ - weightedCovarSqrt_.col(sigmaInd); } } // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n"); } else if (std::isinf(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n"); } else { updateIndices.push_back(i); } } } FB_DEBUG("Update indices are:\n" << updateIndices << "\n"); size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, STATE_SIZE); // H Eigen::MatrixXd kalmanGainSubset(STATE_SIZE, updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx Eigen::VectorXd predictedMeasurement(updateSize); Eigen::VectorXd sigmaDiff(updateSize); Eigen::MatrixXd predictedMeasCovar(updateSize, updateSize); Eigen::MatrixXd crossCovar(STATE_SIZE, updateSize); std::vector<Eigen::VectorXd> sigmaPointMeasurements(sigmaPoints_.size(), Eigen::VectorXd(updateSize)); stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); predictedMeasurement.setZero(); predictedMeasCovar.setZero(); crossCovar.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { FB_DEBUG("WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"); measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-9) { measurementCovarianceSubset(i, i) = 1e-9; FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] << ". Adding some noise to maintain filter stability.\n"); } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } FB_DEBUG("Current state subset is:\n" << stateSubset << "\nMeasurement subset is:\n" << measurementSubset << "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset << "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n"); // (1) Generate sigma points, use them to generate a predicted measurement for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaPointMeasurements[sigmaInd] = stateToMeasurementSubset * sigmaPoints_[sigmaInd]; predictedMeasurement += stateWeights_[sigmaInd] * sigmaPointMeasurements[sigmaInd]; } // (2) Use the sigma point measurements and predicted measurement to compute a predicted // measurement covariance matrix P_zz and a state/measurement cross-covariance matrix P_xz. for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaDiff = sigmaPointMeasurements[sigmaInd] - predictedMeasurement; predictedMeasCovar += covarWeights_[sigmaInd] * (sigmaDiff * sigmaDiff.transpose()); crossCovar += covarWeights_[sigmaInd] * ((sigmaPoints_[sigmaInd] - state_) * sigmaDiff.transpose()); } // (3) Compute the Kalman gain, making sure to use the actual measurement covariance: K = P_xz * (P_zz + R)^-1 Eigen::MatrixXd invInnovCov = (predictedMeasCovar + measurementCovarianceSubset).inverse(); kalmanGainSubset = crossCovar * invInnovCov; // (4) Apply the gain to the difference between the actual and predicted measurements: x = x + K(z - z_hat) innovationSubset = (measurementSubset - predictedMeasurement); // (5) Check Mahalanobis distance of innovation if (checkMahalanobisThreshold(innovationSubset, invInnovCov, measurement.mahalanobisThresh_)) { // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { while (innovationSubset(i) < -PI) { innovationSubset(i) += TAU; } while (innovationSubset(i) > PI) { innovationSubset(i) -= TAU; } } } state_ = state_ + kalmanGainSubset * innovationSubset; // (6) Compute the new estimate error covariance P = P - (K * P_zz * K') estimateErrorCovariance_ = estimateErrorCovariance_.eval() - (kalmanGainSubset * predictedMeasCovar * kalmanGainSubset.transpose()); wrapStateAngles(); // Mark that we need to re-compute sigma points for successive corrections uncorrected_ = false; FB_DEBUG("Predicated measurement covariance is:\n" << predictedMeasCovar << "\nCross covariance is:\n" << crossCovar << "\nKalman gain subset is:\n" << kalmanGainSubset << "\nInnovation:\n" << innovationSubset << "\nCorrected full state is:\n" << state_ << "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n---------------------- /Ukf::correct ----------------------\n"); } }
void Ukf::predict(const double delta) { FB_DEBUG("---------------------- Ukf::predict ----------------------\n" << "delta is " << delta << "\nstate is " << state_ << "\n"); double roll = state_(StateMemberRoll); double pitch = state_(StateMemberPitch); double yaw = state_(StateMemberYaw); // We'll need these trig calculations a lot. double cr = cos(roll); double cp = cos(pitch); double cy = cos(yaw); double sr = sin(roll); double sp = sin(pitch); double sy = sin(yaw); // Prepare the transfer function transferFunction_(StateMemberX, StateMemberVx) = cy * cp * delta; transferFunction_(StateMemberX, StateMemberVy) = (cy * sp * sr - sy * cr) * delta; transferFunction_(StateMemberX, StateMemberVz) = (cy * sp * cr + sy * sr) * delta; transferFunction_(StateMemberX, StateMemberAx) = 0.5 * transferFunction_(StateMemberX, StateMemberVx) * delta; transferFunction_(StateMemberX, StateMemberAy) = 0.5 * transferFunction_(StateMemberX, StateMemberVy) * delta; transferFunction_(StateMemberX, StateMemberAz) = 0.5 * transferFunction_(StateMemberX, StateMemberVz) * delta; transferFunction_(StateMemberY, StateMemberVx) = sy * cp * delta; transferFunction_(StateMemberY, StateMemberVy) = (sy * sp * sr + cy * cr) * delta; transferFunction_(StateMemberY, StateMemberVz) = (sy * sp * cr - cy * sr) * delta; transferFunction_(StateMemberY, StateMemberAx) = 0.5 * transferFunction_(StateMemberY, StateMemberVx) * delta; transferFunction_(StateMemberY, StateMemberAy) = 0.5 * transferFunction_(StateMemberY, StateMemberVy) * delta; transferFunction_(StateMemberY, StateMemberAz) = 0.5 * transferFunction_(StateMemberY, StateMemberVz) * delta; transferFunction_(StateMemberZ, StateMemberVx) = -sp * delta; transferFunction_(StateMemberZ, StateMemberVy) = cp * sr * delta; transferFunction_(StateMemberZ, StateMemberVz) = cp * cr * delta; transferFunction_(StateMemberZ, StateMemberAx) = 0.5 * transferFunction_(StateMemberZ, StateMemberVx) * delta; transferFunction_(StateMemberZ, StateMemberAy) = 0.5 * transferFunction_(StateMemberZ, StateMemberVy) * delta; transferFunction_(StateMemberZ, StateMemberAz) = 0.5 * transferFunction_(StateMemberZ, StateMemberVz) * delta; transferFunction_(StateMemberRoll, StateMemberVroll) = transferFunction_(StateMemberX, StateMemberVx); transferFunction_(StateMemberRoll, StateMemberVpitch) = transferFunction_(StateMemberX, StateMemberVy); transferFunction_(StateMemberRoll, StateMemberVyaw) = transferFunction_(StateMemberX, StateMemberVz); transferFunction_(StateMemberPitch, StateMemberVroll) = transferFunction_(StateMemberY, StateMemberVx); transferFunction_(StateMemberPitch, StateMemberVpitch) = transferFunction_(StateMemberY, StateMemberVy); transferFunction_(StateMemberPitch, StateMemberVyaw) = transferFunction_(StateMemberY, StateMemberVz); transferFunction_(StateMemberYaw, StateMemberVroll) = transferFunction_(StateMemberZ, StateMemberVx); transferFunction_(StateMemberYaw, StateMemberVpitch) = transferFunction_(StateMemberZ, StateMemberVy); transferFunction_(StateMemberYaw, StateMemberVyaw) = transferFunction_(StateMemberZ, StateMemberVz); transferFunction_(StateMemberVx, StateMemberAx) = delta; transferFunction_(StateMemberVy, StateMemberAy) = delta; transferFunction_(StateMemberVz, StateMemberAz) = delta; // (1) Take the square root of a small fraction of the estimateErrorCovariance_ using LL' decomposition weightedCovarSqrt_ = ((STATE_SIZE + lambda_) * estimateErrorCovariance_).llt().matrixL(); // (2) Compute sigma points *and* pass them through the transfer function to save // the extra loop // First sigma point is the current state sigmaPoints_[0] = transferFunction_ * state_; // Next STATE_SIZE sigma points are state + weightedCovarSqrt_[ith column] // STATE_SIZE sigma points after that are state - weightedCovarSqrt_[ith column] for(size_t sigmaInd = 0; sigmaInd < STATE_SIZE; ++sigmaInd) { sigmaPoints_[sigmaInd + 1] = transferFunction_ * (state_ + weightedCovarSqrt_.col(sigmaInd)); sigmaPoints_[sigmaInd + 1 + STATE_SIZE] = transferFunction_ * (state_ - weightedCovarSqrt_.col(sigmaInd)); } // (3) Sum the weighted sigma points to generate a new state prediction state_.setZero(); for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { state_ += stateWeights_[sigmaInd] * sigmaPoints_[sigmaInd]; } // (4) Now us the sigma points and the predicted state to compute a predicted covariance estimateErrorCovariance_.setZero(); Eigen::VectorXd sigmaDiff(STATE_SIZE); for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaDiff = (sigmaPoints_[sigmaInd] - state_); estimateErrorCovariance_ += covarWeights_[sigmaInd] * (sigmaDiff * sigmaDiff.transpose()); } // (5) Not strictly in the theoretical UKF formulation, but necessary here // to ensure that we actually incorporate the processNoiseCovariance_ estimateErrorCovariance_ += delta * processNoiseCovariance_; // Keep the angles bounded wrapStateAngles(); // Mark that we can keep these sigma points uncorrected_ = true; FB_DEBUG("Predicted state is:\n" << state_ << "\nPredicted estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n--------------------- /Ukf::predict ----------------------\n"); }
void Ekf::correct(const Measurement &measurement) { FB_DEBUG("---------------------- Ekf::correct ----------------------\n" << "State is:\n" << state_ << "\n" "Topic is:\n" << measurement.topicName_ << "\n" "Measurement is:\n" << measurement.measurement_ << "\n" "Measurement topic name is:\n" << measurement.topicName_ << "\n\n" "Measurement covariance is:\n" << measurement.covariance_ << "\n"); // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector. Throughout prediction and correction, we // attempt to maximize efficiency in Eigen. // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n"); } else if (std::isinf(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n"); } else { updateIndices.push_back(i); } } } FB_DEBUG("Update indices are:\n" << updateIndices << "\n"); size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, state_.rows()); // H Eigen::MatrixXd kalmanGainSubset(state_.rows(), updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { FB_DEBUG("WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"); measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-9) { FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] << ". Adding some noise to maintain filter stability.\n"); measurementCovarianceSubset(i, i) = 1e-9; } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } FB_DEBUG("Current state subset is:\n" << stateSubset << "\nMeasurement subset is:\n" << measurementSubset << "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset << "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n"); // (1) Compute the Kalman gain: K = (PH') / (HPH' + R) Eigen::MatrixXd pht = estimateErrorCovariance_ * stateToMeasurementSubset.transpose(); Eigen::MatrixXd hphrInv = (stateToMeasurementSubset * pht + measurementCovarianceSubset).inverse(); kalmanGainSubset.noalias() = pht * hphrInv; innovationSubset = (measurementSubset - stateSubset); // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { while (innovationSubset(i) < -PI) { innovationSubset(i) += TAU; } while (innovationSubset(i) > PI) { innovationSubset(i) -= TAU; } } } // (2) Check Mahalanobis distance between mapped measurement and state. if (checkMahalanobisThreshold(innovationSubset, hphrInv, measurement.mahalanobisThresh_)) { // (3) Apply the gain to the difference between the state and measurement: x = x + K(z - Hx) state_.noalias() += kalmanGainSubset * innovationSubset; // (4) Update the estimate error covariance using the Joseph form: (I - KH)P(I - KH)' + KRK' Eigen::MatrixXd gainResidual = identity_; gainResidual.noalias() -= kalmanGainSubset * stateToMeasurementSubset; estimateErrorCovariance_ = gainResidual * estimateErrorCovariance_ * gainResidual.transpose(); estimateErrorCovariance_.noalias() += kalmanGainSubset * measurementCovarianceSubset * kalmanGainSubset.transpose(); // Handle wrapping of angles wrapStateAngles(); FB_DEBUG("Kalman gain subset is:\n" << kalmanGainSubset << "\nInnovation is:\n" << innovationSubset << "\nCorrected full state is:\n" << state_ << "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n---------------------- /Ekf::correct ----------------------\n"); } }
void Ekf::predict(const double delta) { FB_DEBUG("---------------------- Ekf::predict ----------------------\n" << "delta is " << delta << "\n" << "state is " << state_ << "\n"); double roll = state_(StateMemberRoll); double pitch = state_(StateMemberPitch); double yaw = state_(StateMemberYaw); double xVel = state_(StateMemberVx); double yVel = state_(StateMemberVy); double zVel = state_(StateMemberVz); double rollVel = state_(StateMemberVroll); double pitchVel = state_(StateMemberVpitch); double yawVel = state_(StateMemberVyaw); double xAcc = state_(StateMemberAx); double yAcc = state_(StateMemberAy); double zAcc = state_(StateMemberAz); // We'll need these trig calculations a lot. double sp = 0.0; double cp = 0.0; ::sincos(pitch, &sp, &cp); double sr = 0.0; double cr = 0.0; ::sincos(roll, &sr, &cr); double sy = 0.0; double cy = 0.0; ::sincos(yaw, &sy, &cy); // Prepare the transfer function transferFunction_(StateMemberX, StateMemberVx) = cy * cp * delta; transferFunction_(StateMemberX, StateMemberVy) = (cy * sp * sr - sy * cr) * delta; transferFunction_(StateMemberX, StateMemberVz) = (cy * sp * cr + sy * sr) * delta; transferFunction_(StateMemberX, StateMemberAx) = 0.5 * transferFunction_(StateMemberX, StateMemberVx) * delta; transferFunction_(StateMemberX, StateMemberAy) = 0.5 * transferFunction_(StateMemberX, StateMemberVy) * delta; transferFunction_(StateMemberX, StateMemberAz) = 0.5 * transferFunction_(StateMemberX, StateMemberVz) * delta; transferFunction_(StateMemberY, StateMemberVx) = sy * cp * delta; transferFunction_(StateMemberY, StateMemberVy) = (sy * sp * sr + cy * cr) * delta; transferFunction_(StateMemberY, StateMemberVz) = (sy * sp * cr - cy * sr) * delta; transferFunction_(StateMemberY, StateMemberAx) = 0.5 * transferFunction_(StateMemberY, StateMemberVx) * delta; transferFunction_(StateMemberY, StateMemberAy) = 0.5 * transferFunction_(StateMemberY, StateMemberVy) * delta; transferFunction_(StateMemberY, StateMemberAz) = 0.5 * transferFunction_(StateMemberY, StateMemberVz) * delta; transferFunction_(StateMemberZ, StateMemberVx) = -sp * delta; transferFunction_(StateMemberZ, StateMemberVy) = cp * sr * delta; transferFunction_(StateMemberZ, StateMemberVz) = cp * cr * delta; transferFunction_(StateMemberZ, StateMemberAx) = 0.5 * transferFunction_(StateMemberZ, StateMemberVx) * delta; transferFunction_(StateMemberZ, StateMemberAy) = 0.5 * transferFunction_(StateMemberZ, StateMemberVy) * delta; transferFunction_(StateMemberZ, StateMemberAz) = 0.5 * transferFunction_(StateMemberZ, StateMemberVz) * delta; transferFunction_(StateMemberRoll, StateMemberVroll) = transferFunction_(StateMemberX, StateMemberVx); transferFunction_(StateMemberRoll, StateMemberVpitch) = transferFunction_(StateMemberX, StateMemberVy); transferFunction_(StateMemberRoll, StateMemberVyaw) = transferFunction_(StateMemberX, StateMemberVz); transferFunction_(StateMemberPitch, StateMemberVroll) = transferFunction_(StateMemberY, StateMemberVx); transferFunction_(StateMemberPitch, StateMemberVpitch) = transferFunction_(StateMemberY, StateMemberVy); transferFunction_(StateMemberPitch, StateMemberVyaw) = transferFunction_(StateMemberY, StateMemberVz); transferFunction_(StateMemberYaw, StateMemberVroll) = transferFunction_(StateMemberZ, StateMemberVx); transferFunction_(StateMemberYaw, StateMemberVpitch) = transferFunction_(StateMemberZ, StateMemberVy); transferFunction_(StateMemberYaw, StateMemberVyaw) = transferFunction_(StateMemberZ, StateMemberVz); transferFunction_(StateMemberVx, StateMemberAx) = delta; transferFunction_(StateMemberVy, StateMemberAy) = delta; transferFunction_(StateMemberVz, StateMemberAz) = delta; // Prepare the transfer function Jacobian. This function is analytically derived from the // transfer function. double xCoeff = 0.0; double yCoeff = 0.0; double zCoeff = 0.0; double oneHalfATSquared = 0.5 * delta * delta; yCoeff = cy * sp * cr + sy * sr; zCoeff = -cy * sp * sr + sy * cr; double dFx_dR = (yCoeff * yVel + zCoeff * zVel) * delta + (yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFR_dR = 1 + (yCoeff * pitchVel + zCoeff * yawVel) * delta; xCoeff = -cy * sp; yCoeff = cy * cp * sr; zCoeff = cy * cp * cr; double dFx_dP = (xCoeff * xVel + yCoeff * yVel + zCoeff * zVel) * delta + (xCoeff * xAcc + yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFR_dP = (xCoeff * rollVel + yCoeff * pitchVel + zCoeff * yawVel) * delta; xCoeff = -sy * cp; yCoeff = -sy * sp * sr - cy * cr; zCoeff = -sy * sp * cr + cy * sr; double dFx_dY = (xCoeff * xVel + yCoeff * yVel + zCoeff * zVel) * delta + (xCoeff * xAcc + yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFR_dY = (xCoeff * rollVel + yCoeff * pitchVel + zCoeff * yawVel) * delta; yCoeff = sy * sp * cr - cy * sr; zCoeff = -sy * sp * sr - cy * cr; double dFy_dR = (yCoeff * yVel + zCoeff * zVel) * delta + (yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFP_dR = (yCoeff * pitchVel + zCoeff * yawVel) * delta; xCoeff = -sy * sp; yCoeff = sy * cp * sr; zCoeff = sy * cp * cr; double dFy_dP = (xCoeff * xVel + yCoeff * yVel + zCoeff * zVel) * delta + (xCoeff * xAcc + yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFP_dP = 1 + (xCoeff * rollVel + yCoeff * pitchVel + zCoeff * yawVel) * delta; xCoeff = cy * cp; yCoeff = cy * sp * sr - sy * cr; zCoeff = cy * sp * cr + sy * sr; double dFy_dY = (xCoeff * xVel + yCoeff * yVel + zCoeff * zVel) * delta + (xCoeff * xAcc + yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFP_dY = (xCoeff * rollVel + yCoeff * pitchVel + zCoeff * yawVel) * delta; yCoeff = cp * cr; zCoeff = -cp * sr; double dFz_dR = (yCoeff * yVel + zCoeff * zVel) * delta + (yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFY_dR = (yCoeff * pitchVel + zCoeff * yawVel) * delta; xCoeff = -cp; yCoeff = -sp * sr; zCoeff = -sp * cr; double dFz_dP = (xCoeff * xVel + yCoeff * yVel + zCoeff * zVel) * delta + (xCoeff * xAcc + yCoeff * yAcc + zCoeff * zAcc) * oneHalfATSquared; double dFY_dP = (xCoeff * rollVel + yCoeff * pitchVel + zCoeff * yawVel) * delta; // Much of the transfer function Jacobian is identical to the transfer function transferFunctionJacobian_ = transferFunction_; transferFunctionJacobian_(StateMemberX, StateMemberRoll) = dFx_dR; transferFunctionJacobian_(StateMemberX, StateMemberPitch) = dFx_dP; transferFunctionJacobian_(StateMemberX, StateMemberYaw) = dFx_dY; transferFunctionJacobian_(StateMemberY, StateMemberRoll) = dFy_dR; transferFunctionJacobian_(StateMemberY, StateMemberPitch) = dFy_dP; transferFunctionJacobian_(StateMemberY, StateMemberYaw) = dFy_dY; transferFunctionJacobian_(StateMemberZ, StateMemberRoll) = dFz_dR; transferFunctionJacobian_(StateMemberZ, StateMemberPitch) = dFz_dP; transferFunctionJacobian_(StateMemberRoll, StateMemberRoll) = dFR_dR; transferFunctionJacobian_(StateMemberRoll, StateMemberPitch) = dFR_dP; transferFunctionJacobian_(StateMemberRoll, StateMemberYaw) = dFR_dY; transferFunctionJacobian_(StateMemberPitch, StateMemberRoll) = dFP_dR; transferFunctionJacobian_(StateMemberPitch, StateMemberPitch) = dFP_dP; transferFunctionJacobian_(StateMemberPitch, StateMemberYaw) = dFP_dY; transferFunctionJacobian_(StateMemberYaw, StateMemberRoll) = dFY_dR; transferFunctionJacobian_(StateMemberYaw, StateMemberPitch) = dFY_dP; FB_DEBUG("Transfer function is:\n" << transferFunction_ << "\nTransfer function Jacobian is:\n" << transferFunctionJacobian_ << "\nProcess noise covariance is:\n" << processNoiseCovariance_ << "\nCurrent state is:\n" << state_ << "\n"); // (1) Project the state forward: x = Ax (really, x = f(x)) state_ = transferFunction_ * state_; // Handle wrapping wrapStateAngles(); FB_DEBUG("Predicted state is:\n" << state_ << "\nCurrent estimate error covariance is:\n" << estimateErrorCovariance_ << "\n"); // (2) Project the error forward: P = J * P * J' + Q estimateErrorCovariance_ = (transferFunctionJacobian_ * estimateErrorCovariance_ * transferFunctionJacobian_.transpose()); estimateErrorCovariance_.noalias() += (processNoiseCovariance_ * delta); FB_DEBUG("Predicted estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n--------------------- /Ekf::predict ----------------------\n"); }
void FilterBase::processMeasurement(const Measurement &measurement) { FB_DEBUG("------ FilterBase::processMeasurement (" << measurement.topicName_ << ") ------\n"); double delta = 0.0; // If we've had a previous reading, then go through the predict/update // cycle. Otherwise, set our state and covariance to whatever we get // from this measurement. if (initialized_) { // Determine how much time has passed since our last measurement delta = measurement.time_ - lastMeasurementTime_; FB_DEBUG("Filter is already initialized. Carrying out predict/correct loop...\n" "Measurement time is " << std::setprecision(20) << measurement.time_ << ", last measurement time is " << lastMeasurementTime_ << ", delta is " << delta << "\n"); // Only want to carry out a prediction if it's // forward in time. Otherwise, just correct. if (delta > 0) { validateDelta(delta); predict(delta); // Return this to the user predictedState_ = state_; } correct(measurement); } else { FB_DEBUG("First measurement. Initializing filter.\n"); // Initialize the filter, but only with the values we're using size_t measurementLength = measurement.updateVector_.size(); for (size_t i = 0; i < measurementLength; ++i) { state_[i] = (measurement.updateVector_[i] ? measurement.measurement_[i] : state_[i]); } // Same for covariance for (size_t i = 0; i < measurementLength; ++i) { for (size_t j = 0; j < measurementLength; ++j) { estimateErrorCovariance_(i, j) = (measurement.updateVector_[i] && measurement.updateVector_[j] ? measurement.covariance_(i, j) : estimateErrorCovariance_(i, j)); } } initialized_ = true; } if (delta >= 0.0) { // Update the last measurement and update time. // The measurement time is based on the time stamp of the // measurement, whereas the update time is based on this // node's current ROS time. The update time is used to // determine if we have a sensor timeout, whereas the // measurement time is used to calculate time deltas for // prediction and correction. lastMeasurementTime_ = measurement.time_; } FB_DEBUG("------ /FilterBase::processMeasurement (" << measurement.topicName_ << ") ------\n"); }