vector<Point> Widget::findKalmanCenters(vector<Point> dataInput){ vector<Point> kalmanCenters; if(dataInput.empty()){ errorMsg("Nao ha objetos a serem detectados!"); return kalmanCenters; } kalmanCenters.resize(dataInput.size()); KalmanFilter *KF; for(unsigned int i = 0; i < targets.size(); i++){ KF = targets[i]->KF; //apenas conversao de estruturas usadas Mat_<float> measurement(2,1); measurement(0) = dataInput[i].x; measurement(1) = dataInput[i].y; Mat estimated; Mat prediction = KF->predict(); if(detectionSucess[i] == true) { //caso a detecção tenha sido um sucesso jogamos a nova medida no filtro //ao chamar correct já estamos adicionando a nova medida ao filtro estimated = KF->correct(measurement); } else{ /*caso a medição tenha falhada realimentamos o filtro com a própria predição anterior*/ Mat_<float> predictedMeasurement(2,1); predictedMeasurement(0) = prediction.at<float>(0); predictedMeasurement(1) = prediction.at<float>(1); estimated = KF->correct(predictedMeasurement); KF->errorCovPre.copyTo(KF->errorCovPost); //copiar a covPre para covPos [dica de um usuário de algum fórum] } Point statePt(estimated.at<float>(0),estimated.at<float>(1)); kalmanCenters[i] = statePt; /*existe o centro medido pela previsão e o centro que o filtro de kalman acredita ser o real. O centro de kalman é uma ponderação das medidas e do modelo com conhecimento prévio de um erro aleatório*/ } return kalmanCenters; }
vector<vector<Point> > Widget::predictFuture(int futureSize){ vector<vector<Point> > future; future.resize(targets.size()); for(unsigned int i = 0; i < targets.size(); i++) { KalmanFilter *KF; KF = targets[i]->KF; KalmanFilter DelfusOracle = myMath::copyKF(*KF); /*para ver o futuro copiamos o estado do filtro atual e o realimentamos com suas próprias previsões um número fixo de vezes*/ for(int j = 0; j < futureSize; j++) //CALCULA PONTOS DO FUTURO { Mat prediction = DelfusOracle.predict(); Point predictPt(prediction.at<float>(0),prediction.at<float>(1)); future[i].push_back(predictPt); Mat_<float> predictedMeasurement(2,1); predictedMeasurement(0) = prediction.at<float>(0); predictedMeasurement(1) = prediction.at<float>(1); DelfusOracle.correct(predictedMeasurement); DelfusOracle.errorCovPre.copyTo(DelfusOracle.errorCovPost); //copiamos covPre para covPost seguindo a dica de um fórum //o Vidal não gosta dessa dica, diz ele que isso engana o filtro } }//end for each color return future; }
void Ukf::correct(const Measurement &measurement) { FB_DEBUG("---------------------- Ukf::correct ----------------------\n" << "State is:\n" << state_ << "\nMeasurement is:\n" << measurement.measurement_ << "\nMeasurement covariance is:\n" << measurement.covariance_ << "\n"); // In our implementation, it may be that after we call predict once, we call correct // several times in succession (multiple measurements with different time stamps). In // that event, the sigma points need to be updated to reflect the current state. if(!uncorrected_) { // Take the square root of a small fraction of the estimateErrorCovariance_ using LL' decomposition weightedCovarSqrt_ = ((STATE_SIZE + lambda_) * estimateErrorCovariance_).llt().matrixL(); // Compute sigma points // First sigma point is the current state sigmaPoints_[0] = state_; // Next STATE_SIZE sigma points are state + weightedCovarSqrt_[ith column] // STATE_SIZE sigma points after that are state - weightedCovarSqrt_[ith column] for(size_t sigmaInd = 0; sigmaInd < STATE_SIZE; ++sigmaInd) { sigmaPoints_[sigmaInd + 1] = state_ + weightedCovarSqrt_.col(sigmaInd); sigmaPoints_[sigmaInd + 1 + STATE_SIZE] = state_ - weightedCovarSqrt_.col(sigmaInd); } } // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n"); } else if (std::isinf(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n"); } else { updateIndices.push_back(i); } } } FB_DEBUG("Update indices are:\n" << updateIndices << "\n"); size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, STATE_SIZE); // H Eigen::MatrixXd kalmanGainSubset(STATE_SIZE, updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx Eigen::VectorXd predictedMeasurement(updateSize); Eigen::VectorXd sigmaDiff(updateSize); Eigen::MatrixXd predictedMeasCovar(updateSize, updateSize); Eigen::MatrixXd crossCovar(STATE_SIZE, updateSize); std::vector<Eigen::VectorXd> sigmaPointMeasurements(sigmaPoints_.size(), Eigen::VectorXd(updateSize)); stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); predictedMeasurement.setZero(); predictedMeasCovar.setZero(); crossCovar.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { FB_DEBUG("WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"); measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-9) { measurementCovarianceSubset(i, i) = 1e-9; FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] << ". Adding some noise to maintain filter stability.\n"); } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } FB_DEBUG("Current state subset is:\n" << stateSubset << "\nMeasurement subset is:\n" << measurementSubset << "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset << "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n"); // (1) Generate sigma points, use them to generate a predicted measurement for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaPointMeasurements[sigmaInd] = stateToMeasurementSubset * sigmaPoints_[sigmaInd]; predictedMeasurement += stateWeights_[sigmaInd] * sigmaPointMeasurements[sigmaInd]; } // (2) Use the sigma point measurements and predicted measurement to compute a predicted // measurement covariance matrix P_zz and a state/measurement cross-covariance matrix P_xz. for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaDiff = sigmaPointMeasurements[sigmaInd] - predictedMeasurement; predictedMeasCovar += covarWeights_[sigmaInd] * (sigmaDiff * sigmaDiff.transpose()); crossCovar += covarWeights_[sigmaInd] * ((sigmaPoints_[sigmaInd] - state_) * sigmaDiff.transpose()); } // (3) Compute the Kalman gain, making sure to use the actual measurement covariance: K = P_xz * (P_zz + R)^-1 Eigen::MatrixXd invInnovCov = (predictedMeasCovar + measurementCovarianceSubset).inverse(); kalmanGainSubset = crossCovar * invInnovCov; // (4) Apply the gain to the difference between the actual and predicted measurements: x = x + K(z - z_hat) innovationSubset = (measurementSubset - predictedMeasurement); // (5) Check Mahalanobis distance of innovation if (checkMahalanobisThreshold(innovationSubset, invInnovCov, measurement.mahalanobisThresh_)) { // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { while (innovationSubset(i) < -PI) { innovationSubset(i) += TAU; } while (innovationSubset(i) > PI) { innovationSubset(i) -= TAU; } } } state_ = state_ + kalmanGainSubset * innovationSubset; // (6) Compute the new estimate error covariance P = P - (K * P_zz * K') estimateErrorCovariance_ = estimateErrorCovariance_.eval() - (kalmanGainSubset * predictedMeasCovar * kalmanGainSubset.transpose()); wrapStateAngles(); // Mark that we need to re-compute sigma points for successive corrections uncorrected_ = false; FB_DEBUG("Predicated measurement covariance is:\n" << predictedMeasCovar << "\nCross covariance is:\n" << crossCovar << "\nKalman gain subset is:\n" << kalmanGainSubset << "\nInnovation:\n" << innovationSubset << "\nCorrected full state is:\n" << state_ << "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n---------------------- /Ukf::correct ----------------------\n"); } }