const Shape ShapeMod::ConformShapeToMod_( // return shape conformed to shape model VEC& b, // io: eigvec weights from previous iters of ASM const Shape& shape, // in: shape suggested by the descriptor models int ilev) // in: pyramid level (0 is full size) const { // static for efficiency (init once) static const VEC pointweights(PointWeights()); Shape newshape = ConformShapeToMod(b, shape, meanshape_ * GetPyrScale(ilev), eigvals_ / pow(SQ(PYR_RATIO), ilev), eigvecs_, eigvecsi_, bmax_, pointweights); JitterPointsAt00InPlace(newshape); // jitter points at 0,0 if any if (ilev >= SHAPEHACK_MINPYRLEV) // allow shape hacks only at coarse pyr levs ApplyShapeModelHacks(newshape, hackbits_); return newshape; }
static void StartShapeAndRoi( // we have the facerect, now get the rest Shape& startshape, // out: the start shape we are looking for Image& face_roi, // out: ROI around face, possibly rotated upright DetPar& detpar_roi, // out: detpar wrt to face_roi DetPar& detpar, // io: detpar wrt to img (has face rect on entry) const Image& img, // in: the image (grayscale) const vec_Mod& mods) // in: a vector of models, one for each yaw range // (use only estart, and meanshape) { PossiblySetRotToZero(detpar.rot); // treat small rots as zero rots FaceRoiAndDetPar(face_roi, detpar_roi, // get ROI around face img, detpar, false); DetectEyesAndMouth(detpar_roi, // use OpenCV eye and mouth detectors face_roi); // Some face detectors return the face rotation, some don't (in // the call to NextFace_ just made via NextStartShapeAndRoi). // If we don't have the rotation, then estimate it from the eye // angle, if the eyes are available. if (!Valid(detpar.rot)) // don't have the face rotation? { detpar_roi.rot = EyeAngle(detpar_roi); if (!Valid(detpar_roi.rot)) // eye angle not available? detpar_roi.rot = 0; PossiblySetRotToZero(detpar_roi.rot); detpar.rot = detpar_roi.rot; if (detpar.rot != 0) { // face is rotated: rotate ROI and re-get the eyes and mouth // TODO: Prevent bogus OpenCV assert fail face_roi.data == img.data. face_roi = Image(0,0); FaceRoiAndDetPar(face_roi, detpar_roi, img, detpar, false); DetectEyesAndMouth(detpar_roi, // use OpenCV eye and mouth detectors face_roi); } } TraceEyesMouth(face_roi, detpar_roi); if (trace_g) lprintf("%-6.6s yaw %3.0f rot %3.0f ", EyawAsString(detpar_roi.eyaw), detpar_roi.yaw, detpar_roi.rot); else logprintf("%-6.6s yaw %3.0f rot %3.0f ", EyawAsString(detpar_roi.eyaw), detpar_roi.yaw, detpar_roi.rot); // select an ASM model based on the face's yaw const Mod* mod = mods[ABS(EyawAsModIndex(detpar_roi.eyaw, mods))]; const ESTART estart = mod->Estart_(); startshape = StartShapeFromDetPar(detpar_roi, face_roi, mod->MeanShape_(), estart); detpar.lex = detpar_roi.lex; if (IsLeftFacing(detpar_roi.eyaw)) FlipImgInPlace(face_roi); JitterPointsAt00InPlace(startshape); }