Warp MT::Lucas_Kanade(Warp warp) { for (int iter = 0; iter < max_iteration; ++iter) { Matx61f G; Matx<float, 6, 6> H; G = 0.0f; H = 0.0f; float E = 0.0f; for (int i = 0; i < fine_samples.size(); ++i) { Matx<float, L4, 1> T(fine_model.ptr<float>(i)), F; Matx<float, 2, L4> dF; Matx<float, 2, 6> dW = warp.gradient(fine_samples[i]); Point2f p = warp.transform2(fine_samples[i]); feature.gradient4(p.x, p.y, F.val, dF.val, dF.val + L4); T -= F; float e = sigmoid(T.dot(T)); E += e; float w = sigmoid_factor * e * (1.0f - e); G += w * (dW.t() * (dF * T)); H += w * (dW.t() * (dF * dF.t()) * dW); } E = E / fine_samples.size(); Matx61f D; solve(H, G, D, DECOMP_SVD); warp.steepest(D); if (iter > 1 && D(3) * D(3) + D(4) * D(4) + D(5) * D(5) < translate_eps) { if (log != NULL) (*log) << "\terror in iteration " << iter << " = " << E << endl; break; } } return warp; }
Warp MT::Lucas_Kanade(Warp warp) { //See "Lucas-Kanade 20 years on A unifying framework" float last_E = 1.0f; for (int iter = 0; iter < max_iteration; ++iter) { ++number_iteration; Matrix<float, 6, 1> G = Matrix<float, 6, 1>::Constant(0.0f); Matrix<float, 6, 6> H = Matrix<float, 6, 6>::Constant(0.0f); float E = 0.0f; for (int i = 0; i < fine_samples.size(); ++i) { Matrix<float, 2, 6> dW; Vector2f p = warp.gradient(fine_samples[i], dW); Vector32f F; Matrix<float, 32, 2> dF; feature.gradient4(p.x(), p.y(), F.data(), dF.col(0).data(), dF.col(1).data()); F -= fine_model.col(i); float e = sigmoid(F.squaredNorm()); float w = sigmoid_factor * e * (1.0f - e); G += w * (dW.transpose() * (dF.transpose() * -F)); //H.triangularView<Upper> += w * (dW.transpose() * (dF.transpose() * dF) * dW); hessian(H, w, dW, dF); E += e; } E = E / fine_samples.size(); H.triangularView<Lower>() = H.transpose(); Matrix<float, 6, 1> D = H.fullPivHouseholderQr().solve(G); warp.steepest(D); if (log != NULL) (*log) << E << " "; if (iter > 1 && D.segment<3>(3).squaredNorm() < translate_eps && last_E - E < error_eps) break; last_E = E; } if (log != NULL) (*log) << endl; return warp; }