void PinnedStartShapeAndRoi( // use the pinned landmarks to init the start shape Shape& startshape, // out: the start shape (in ROI frame) Image& face_roi, // out: ROI around face, possibly rotated upright DetPar& detpar_roi, // out: detpar wrt to face_roi DetPar& detpar, // out: detpar wrt to img Shape& pinned_roi, // out: pinned arg translated to ROI frame const Image& img, // in: the image (grayscale) const vec_Mod& mods, // in: a vector of models, one for each yaw range const Shape& pinned) // in: manually pinned landmarks { double rot, yaw; EstRotAndYawFrom5PointShape(rot, yaw, As5PointShape(pinned, mods[0]->MeanShape_())); const EYAW eyaw = DegreesAsEyaw(yaw, NSIZE(mods)); const int imod = EyawAsModIndex(eyaw, mods); // select ASM model based on yaw if (trace_g) lprintf("%-6.6s yaw %3.0f rot %3.0f ", EyawAsString(eyaw), yaw, rot); pinned_roi = pinned; // use pinned_roi as a temp shape we can change Image workimg(img); // possibly flipped image if (IsLeftFacing(eyaw)) // left facing? (our models are for right facing faces) { pinned_roi = FlipShape(pinned_roi, workimg.cols); FlipImgInPlace(workimg); } const Mod* mod = mods[ABS(imod)]; startshape = PinMeanShape(pinned_roi, mod->MeanShape_()); startshape = mod->ConformShapeToMod_Pinned_(startshape, pinned_roi); detpar = PseudoDetParFromStartShape(startshape, rot, yaw, NSIZE(mods)); if (IsLeftFacing(eyaw)) detpar.rot *= -1; FaceRoiAndDetPar(face_roi, detpar_roi, workimg, detpar, false); startshape = ImgShapeToRoiFrame(startshape, detpar_roi, detpar); pinned_roi = ImgShapeToRoiFrame(pinned_roi, detpar_roi, detpar); // following line not strictly necessary because don't actually need eyes/mouth InitDetParEyeMouthFromShape(detpar_roi, startshape); if (IsLeftFacing(eyaw)) { detpar = FlipDetPar(detpar, img.cols); detpar.rot = -detpar.rot; detpar_roi.x += 2. * (face_roi.cols/2. - detpar_roi.x); } }
cv::Mat iteratedTransform(cv::Mat& before, cv::Mat& after, int l = 64) { // image dimensions int width = before.cols; int height = before.rows; cv::Size size(width, height); // initial transform cv::Mat transform(2, 3, CV_64FC1); transform.at<double>(0) = cos(M_PI * (angle - 1)/ 180); transform.at<double>(1) = sin(M_PI * (angle - 1)/ 180); transform.at<double>(3) = -sin(M_PI * (angle - 1)/ 180);; transform.at<double>(4) = cos(M_PI * (angle - 1)/ 180); transform.at<double>(2) = 0; transform.at<double>(5) = 0; std::cout << "start transform: " << transform << std::endl; double s = 0; do { // apply current transform to a copy of the before image cv::Mat workimg(width, height, CV_32FC1); cv::warpAffine(before, workimg, transform, size); // determine remaining transform cv::Mat newtransform = findTransform(workimg, after, l); std::cout << "newtransform: " << newtransform << std::endl; // add remaining transform to current transform double u[6]; u[0] = newtransform.at<double>(0) * transform.at<double>(0) + newtransform.at<double>(1) * transform.at<double>(3); u[1] = newtransform.at<double>(0) * transform.at<double>(1) + newtransform.at<double>(1) * transform.at<double>(4); u[3] = newtransform.at<double>(3) * transform.at<double>(0) + newtransform.at<double>(4) * transform.at<double>(3); u[4] = newtransform.at<double>(3) * transform.at<double>(1) + newtransform.at<double>(4) * transform.at<double>(4); u[2] = newtransform.at<double>(0) * transform.at<double>(2) + newtransform.at<double>(1) * transform.at<double>(5) + newtransform.at<double>(2); u[5] = newtransform.at<double>(3) * transform.at<double>(2) + newtransform.at<double>(4) * transform.at<double>(5) + newtransform.at<double>(5); for (int i = 0; i < 6; i++) { transform.at<double>(i) = u[i]; } std::cout << "accumulated: " << transform << std::endl; // how close is the newtransform to the identity? s = 0; for (int i = 0; i < 6; i++) { double x = newtransform.at<double>(i); if ((i == 0) || (i == 4)) { x = 1 - x; } s += x * x; } std::cout << "s = " << s << std::endl; } while (s > epsilon); return transform; }