コード例 #1
0
bool CFaceDetect::detectByAsm( const Mat &faceImage,vector<CFaceRect> &rclist,bool onlybig )
{
	int foundface = 0;
	float landmarks[2 * stasm_NLANDMARKS]; // x,y coords (note the 2)
	//if(!stasm_init("../model",0))
	//{
	//	return ;
	//}
	Mat_<unsigned char> matimage = faceImage;
	//detect face
	if (!stasm_open_image((const char*)matimage.data, faceImage.cols, faceImage.rows, "image",
				onlybig?0:1 /*multiface*/, 10 /*minwidth*/))
	{
		//cout<<"detect face failed !!"<<stasm_lasterr()<<endl;	
		return false;
	}
	Mat_<unsigned char> outimg(faceImage.clone());
	for(;;)
	{
		CFaceRect facerect;
		//find all the face
		if(!stasm_search_auto(&foundface, landmarks))
		{
			return false;
		}

		if(foundface == 0)
		{
			break;
		}

		facerect.noalign = false;
		//landmarks inner the image
		stasm_force_points_into_image(landmarks, faceImage.cols, faceImage.rows);
		getAsmPointer(landmarks,facerect.rcface);
		//printLandmarks(landmarks);
		//drawLandmarks(outimg, landmarks);
		//markLandmarks(outimg, landmarks);

		facerect.rcfullface = facerect.rcface;
		facerect.rclefteye = Rect((int)landmarks[L_LPupil * 2],(int)landmarks[L_LPupil * 2 + 1],1,1);
		facerect.rcrighteye = Rect((int)landmarks[L_RPupil * 2],(int)landmarks[L_RPupil * 2 + 1],1,1);
		facerect.rcmouth = Rect((int)landmarks[L_CTopOfTopLip * 2],(int)landmarks[L_CTopOfTopLip * 2 + 1],1,1);
		//cout<<"eye "<<facerect.rclefteye.x<<" "<<facerect.rclefteye.y<<" "<<facerect.rcrighteye.x<<" "<<facerect.rcrighteye.y<<" "<<facerect.rcmouth.x<<" "<<facerect.rcmouth.y<<endl;
		outimg(cvRound(landmarks[L_LPupil*2+1]),cvRound(landmarks[2*L_LPupil]))=255;
		outimg(cvRound(landmarks[L_RPupil*2+1]),cvRound(landmarks[2*L_RPupil]))=255;
		outimg(cvRound(landmarks[L_CTopOfTopLip*2+1]),cvRound(landmarks[2*L_CTopOfTopLip]))=255;
		rclist.push_back(facerect);
	}
	return true;
	//imwrite("test_stasm_lib_auto.bmp", outimg);

}
コード例 #2
0
ファイル: minimal2.cpp プロジェクト: apprisi/stasm4
int main()
{
    if (!stasm_init("../data", 0 /*trace*/))
        error("stasm_init failed: ", stasm_lasterr());

    static const char* path = "../data/testface.jpg";

    cv::Mat_<unsigned char> img(cv::imread(path, CV_LOAD_IMAGE_GRAYSCALE));

    if (!img.data)
        error("Cannot load", path);

    if (!stasm_open_image((const char*)img.data, img.cols, img.rows, path,
                          1 /*multiface*/, 10 /*minwidth*/))
        error("stasm_open_image failed: ", stasm_lasterr());

    int foundface;
    float landmarks[2 * stasm_NLANDMARKS]; // x,y coords (note the 2)

    int nfaces = 0;
    while (1)
    {
        if (!stasm_search_auto(&foundface, landmarks))
             error("stasm_search_auto failed: ", stasm_lasterr());

        if (!foundface)
            break;      // note break

        // for demonstration, convert from Stasm 77 points to XM2VTS 68 points
        stasm_convert_shape(landmarks, 68);

        // draw the landmarks on the image as white dots
        stasm_force_points_into_image(landmarks, img.cols, img.rows);
        for (int i = 0; i < stasm_NLANDMARKS; i++)
            img(cvRound(landmarks[i*2+1]), cvRound(landmarks[i*2])) = 255;

        nfaces++;
    }
    printf("%s: %d face(s)\n", path, nfaces);
    fflush(stdout);
    cv::imwrite("minimal2.bmp", img);
    cv::imshow("stasm minimal2", img);
    cv::waitKey();

    return 0;
}
コード例 #3
0
bool FaceAlignment::detectLandmarks(cv::Mat& img, std::vector<std::vector<cv::Point2d> >& points, std::vector<cv::Point2d>& centers) {

    float landmarks[2 * stasm_NLANDMARKS];
    mutex->lock();



    int allowMultiFace = 0;
    int minFaceWidthInpercentOfWidth = 5;
    commonTool.log(QString("###%1###").arg(QString::fromStdString(dataPath.c_str())));
    commonTool.log(QString("##$%1$##").arg(strnlen(dataPath.c_str(),50)));

    int success = stasm_open_image((const char*)img.data, img.cols, img.rows, dataPath.c_str(), allowMultiFace, minFaceWidthInpercentOfWidth);
    if (!success) commonTool.log("Error!!!");

    int foundface = 1;
    bool aFaceWasFound = false;

//    int success =  stasm_search_single(&foundface, landmarks, (char*)img.data, img.cols, img.rows, dataPath.c_str(), dataPath.c_str());
//    if (!success) commonTool.log("Error!!!");

    while (foundface) {
        success =  stasm_search_auto(&foundface, landmarks);
        if (!success) commonTool.log("Error when calling stasm_search_auto");
        //commonTool.log(QString("FoundFace --> %1").arg(foundface));
        //if (!success || foundface == 0) {
        //    return false;
        //}
        if (foundface) {
            aFaceWasFound = true;
            int numberOfPoint = stasm_NLANDMARKS;
            std::vector<cv::Point2d> landmarkList;
            cv::Point2d center = cv::Point2d(0.0, 0.0);
            for (int i = 0; i < numberOfPoint; i++) {
                cv::Point2d p = cv::Point2d(landmarks[i*2], landmarks[i*2+1]);
                landmarkList.push_back(p);
                center += p;
            }
            points.push_back(landmarkList);
            center = cv::Point2d(center.x/numberOfPoint, center.y/numberOfPoint);
            centers.push_back(center);
        }
    }
    mutex->unlock();
    return aFaceWasFound;
}
コード例 #4
0
ファイル: py_wrapper.cpp プロジェクト: ananya77041/PyStasm
static PyObject* Py_search_auto(
	PyObject*	self,
	PyObject*	)
{
	int foundface;
	float* landmarks;

	landmarks = new float[stasm_NLANDMARKS * 2];

	if (!stasm_search_auto(&foundface, landmarks))
	{
		PyErr_SetString(StasmException, stasm_lasterr());
		delete[] landmarks;
		return NULL;
	}

	int num_landmarks = foundface ? stasm_NLANDMARKS : 0;
	return landmarks_to_PyArray(landmarks, num_landmarks);
}
コード例 #5
0
ファイル: stasm_main.cpp プロジェクト: apprisi/stasm4
static void ProcessImg(
    const char* imgpath) // in
{
    Image img(cv::imread(imgpath, CV_LOAD_IMAGE_GRAYSCALE));
    if (!img.data)
        Err("Cannot load %s", imgpath);
    if (!stasm_open_image((const char*)img.data, img.cols, img.rows, imgpath,
                          multiface_g, minwidth_g))
        Err("stasm_open_image failed:  %s", stasm_lasterr());

    CImage cimg;     // color version of image
    if (writeimgs_g) // actually need the color image?
        cvtColor(img, cimg, CV_GRAY2BGR);
    int nfaces = 0;
    while (1)
    {
        if (trace_g && nfaces > 0 && multiface_g)
            stasm_printf("\n%d: ", nfaces);

        int foundface;
        float landmarks[2 * stasm_NLANDMARKS]; // x,y coords
        if (!stasm_search_auto(&foundface, landmarks))
            Err("stasm_search_auto failed: %s", stasm_lasterr());

        if (!foundface)
            break; // note break

        ProcessFace(cimg, landmarks, nfaces, Base(imgpath));
        nfaces++;
    }
    if (trace_g)
        lprintf("\n");
    if (writeimgs_g && nfaces)
    {
        // write as a bmp not as a jpg because don't want blurred shape lines
        char newpath[SLEN]; sprintf(newpath, "%s_stasm.bmp", Base(imgpath));
        lprintf("%s ", newpath);
        if (!cv::imwrite(newpath, cimg))
            Err("Could not write %s", newpath);
    }
    lprintf("%d face%s\n", nfaces, plural(nfaces));
}
コード例 #6
0
bool CFaceDetect::detectByAsmMarks( const Mat &faceImage,vector<float *> &vlandmarks,bool onlybig )
{
	int foundface = 0;
	Mat_<unsigned char> matimage = faceImage;
	//detect face
	if (!stasm_open_image((const char*)matimage.data, faceImage.cols, faceImage.rows, "image",
				onlybig?0:1 /*multiface*/, 10 /*minwidth*/))
	{
		return false;
	}
	Mat_<unsigned char> outimg(faceImage.clone());
	for(;;)
	{
		CFaceRect facerect;
		float *landmarks = new float[2 * stasm_NLANDMARKS]; // x,y coords (note the 2)
		//find all the face
		if(!stasm_search_auto(&foundface, landmarks))
		{
			return false;
		}

		if(foundface == 0)
		{
			break;
		}

		facerect.noalign = false;
		//landmarks inner the image
		stasm_force_points_into_image(landmarks, faceImage.cols, faceImage.rows);
		//printLandmarks(landmarks);
		//drawLandmarks(outimg, landmarks);
		//markLandmarks(outimg, landmarks);
		//imshow("tmp", outimg);

		vlandmarks.push_back(landmarks);
	}
	return true;
}
コード例 #7
0
ファイル: qstasm.cpp プロジェクト: pi-null-mezon/Interview
void QStasm::search_points(const cv::Mat &image, const cv::RotatedRect rRect)
{   
    cv::Mat temp;
    if(image.channels() == 3)
        cv::cvtColor(image, temp, CV_BGR2GRAY);
    else if(image.channels() == 1)
        temp = image;   
    stasm_enroll_face_image((char*)temp.data, temp.cols, temp.rows);
    int facesFound = 0;
    if( stasm_search_auto(&facesFound, pt_landmarks)) {
        __updateHistory(pt_landmarks, temp.cols);
        __getAveragePoints(pt_landmarks);
        emit landmarksUpdated(image, pt_landmarks, 2 * stasm_NLANDMARKS);
        double eyesDistance = std::sqrt((pt_landmarks[2*38] - pt_landmarks[2*39])*(pt_landmarks[2*38] - pt_landmarks[2*39]) +
                              (pt_landmarks[2*38+1] - pt_landmarks[2*39+1])*(pt_landmarks[2*38+1] - pt_landmarks[2*39+1]));
        emit eyesdistanceUpdated( eyesDistance );
        if(compositeFlag)
            __makeComposition(pt_landmarks, rRect);
    }

    m_frametime = (cv::getTickCount() -  m_time) * 1000.0 / cv::getTickFrequency(); // result is calculated in milliseconds
    m_time = cv::getTickCount();
    emit frametimeUpdated(m_frametime);
}