Exemple #1
0
    JNIEXPORT jintArray JNICALL Java_com_tuwien_snowwhite_FacialFeatureActivity_FindFaceLandmarks(
        JNIEnv* env, jobject, jstring jpath)
    {
        clock_t StartTime = clock();
        jintArray arr = env->NewIntArray(2*stasm_NLANDMARKS);
        jint *out = env->GetIntArrayElements(arr, NULL);

        static const char * path;
        path = env->GetStringUTFChars( jpath , NULL ) ;
        cv::Mat_<unsigned char> img(cv::imread(path, CV_LOAD_IMAGE_GRAYSCALE));

        if (!img.data)
        {
            __android_log_print(ANDROID_LOG_ERROR, "FacialFeatureActivity_FindFaceLandmarks", "Cannot load image file %s", path);
            out[0] = -1;
            out[1] = -1;
            img.release();
            env->ReleaseIntArrayElements(arr, out, 0);
            return arr;
        }

        int foundface;
        float landmarks[2 * stasm_NLANDMARKS]; // x,y coords

        if (!stasm_search_single(&foundface, landmarks, (const char*)img.data, img.cols, img.rows, path,
                                 "/data/data/com.tuwien.snowwhite/app_stasmdata/"))
        {
            __android_log_print(ANDROID_LOG_ERROR, "FacialFeatureActivity_FindFaceLandmarks", "Error in stasm_search_single %s", stasm_lasterr());
            out[0] = -2;
            out[1] = -2;
            img.release();
            env->ReleaseIntArrayElements(arr, out, 0);
            return arr;
        }

        if (!foundface)
        {
            __android_log_print(ANDROID_LOG_WARN, "FacialFeatureActivity_FindFaceLandmarks", "no face found");
            out[0] = -3;
            out[1] = -3;
            img.release();
            env->ReleaseIntArrayElements(arr, out, 0);
            return arr;
        } else {
            for (int i = 0; i < stasm_NLANDMARKS; i++)
            {
                out[2*i]   = cvRound(landmarks[2*i]);
                out[2*i+1] = cvRound(landmarks[2*i+1]);
            }
        }
        double TotalAsmTime = double(clock() - StartTime) / CLOCKS_PER_SEC;
        __android_log_print(ANDROID_LOG_INFO, "FacialFeatureActivity_FindFaceLandmarks", "Stasm Ver:%s Img:%dx%d ---> Time:%.3f secs.", stasm_VERSION, img.cols, img.rows, TotalAsmTime);

        img.release();
        env->ReleaseIntArrayElements(arr, out, 0);
        return arr;
    }
// Analiza una imagen tratando de identificar un vehículo.
bool detect_hand(cv::Mat& frame){
	// Variables necesarias.
	float puntos_extraidos[2*stasm_NLANDMARKS];
	std::vector<cv::Rect> detecciones;
	cv::Mat gray_frame;
	int salida = false;
	
	// Preparo la imagen.
	cv::cvtColor(frame, gray_frame, cv::COLOR_BGR2GRAY);

	
	// Uso el detector sobre la imagen. Me aseguro de detectar rectángulos mayores de la mitad de la
	// imagen.
	hand_detector.detectMultiScale(
		gray_frame,
		detecciones,
		1.5,
		3,
		0|cv::CASCADE_SCALE_IMAGE,
		frame.size()/2,
		frame.size());
		
	// Si se han detectado objetos.
	if(detecciones.size() > 0){
		int mano_encontrada;
		std::cout << "Mano encontrada en "<< detecciones[0] << std::endl;
		cv::rectangle(frame, detecciones[0], cv::Scalar(0,0,255), 4);			
		
		if(!stasm_search_single(
			&mano_encontrada,
			puntos_extraidos,
			(char*)gray_frame.data,
			gray_frame.size().width,
			gray_frame.size().height,
			"path_prueba",
			"../data")){
				std::cout << "Puntos no encontrados" << std::endl;
		}
		else{
			salida = true;
			
			cv::Point2f p1, p2;
			p1 = cv::Point2f(puntos_extraidos[0],puntos_extraidos[1]);
			cv::circle(frame, p1, 1, cv::Scalar(0,255,0), 3);
			
			
			for(int i=2; i<stasm_NLANDMARKS*2; i+=2){
				p2 = cv::Point2f(puntos_extraidos[i],puntos_extraidos[i+1]);
				cv::circle(frame, p2, 1, cv::Scalar(0,255,0), 3);
				cv::line(frame, p1, p2, cv::Scalar(0,255,0));
				p1 = p2;
			}// Fin for.
		}// Fin else.	
	}// Fin if.
	
	return salida;
}//Fin 'detect_hand'.
Exemple #3
0
int main(int argc, char *argv[])
{
    std::string path = "../data/testface.jpg";
    if (argc > 1)
        path = argv[1];

    cv::Mat_<unsigned char> img(cv::imread(path, CV_LOAD_IMAGE_GRAYSCALE));

    if (!img.data)
    {
        printf("Cannot load %s\n", path.c_str());
        exit(1);
    }
    cv::resize(img, img, cv::Size(100, 100));

    int foundface;
    float landmarks[2 * stasm_NLANDMARKS]; // x,y coords (note the 2)

    if (!stasm_search_single(&foundface, landmarks,
                             (const char*)img.data, img.cols, img.rows, path.c_str(), "../data"))
    {
        printf("Error in stasm_search_single: %s\n", stasm_lasterr());
        exit(1);
    }

    if (!foundface)
         printf("No face found in %s\n", path.c_str());
    else
    {
        // draw the landmarks on the image as white dots (image is monochrome)
        stasm_force_points_into_image(landmarks, img.cols, img.rows);
        for (int i = 0; i < stasm_NLANDMARKS; i++)
            img(cvRound(landmarks[i*2+1]), cvRound(landmarks[i*2])) = 255;
    }

    cv::imwrite("minimal.bmp", img);
    cv::imshow("stasm minimal", img);
    cv::waitKey();
    return 0;
}
Exemple #4
0
bool face_points(cv::Mat* img,float* landmarks, bool add_boundary_points = true) {
	const char* path = "/data/data/org.stasmdemo.android/app_stasm/testface.jpg";
//	cv::Mat img=cv::imread(imgpath,CV_LOAD_IMAGE_GRAYSCALE);//注意!!这里需要完成img转换
	if(!img->data){
		aprintf("-------stasm -------------  img load fail");
		return false;
	}
	int foundface;
	aprintf("locator face_points landmarks : %i ptr : %i",len(landmarks),landmarks);
	if(!stasm_search_single(&foundface,landmarks,(const char*)img->data,img->cols,img->rows,path,"/data/data/org.stasmdemo.android/app_stasm/")){
		aprintf("--------stasm------------error in stasm search single");
		return false;
	}
	aprintf("locator face_points landmarks : %i ptr : %i",len(landmarks),landmarks);
	if(!foundface){
		aprintf("-------stasm-------------no face found");
	}else{
		stasm_force_points_into_image(landmarks,img->cols,img->rows);
	}
	aprintf("locator face_points landmarks : %i ptr : %i",len(landmarks),landmarks);
	return true;
	}
Exemple #5
0
static PyObject* Py_search_single(
	PyObject*	self,
	PyObject*	args,
	PyObject*	kwargs)
{
	PyObject*	img_obj;
	const char*	debugpath	= "";
	const char*	datadir		= PyStr_AS_STRING(datadir_default);

	static const char* kwlist[] = { "image", "debugpath", "datadir", NULL };

	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|ss:search_single",
					const_cast<char**>(kwlist), &img_obj,
					&debugpath, &datadir))
		return NULL;

	int width, height;
	const char* img_data = PyArray_to_image(img_obj, &width, &height);
	if (img_data == NULL)
	{
		PyErr_SetString(PyExc_TypeError, imarray_error);
		return NULL;
	}

	int foundface;
	float* landmarks = new float[stasm_NLANDMARKS * 2];

	if (!stasm_search_single(&foundface, landmarks, img_data, width,
				 height, debugpath, datadir))
	{
		PyErr_SetString(StasmException, stasm_lasterr());
		delete[] landmarks;
		return NULL;
	}

	int landmarks_found = foundface ? stasm_NLANDMARKS : 0;
	return landmarks_to_PyArray(landmarks, landmarks_found);
}
// Programa principal.
int main(int argc, char** argv){

	// Abrimos el dispositivo de video por defecto.
	cv::VideoCapture camera(0);
	if(!camera.isOpened()){
		std::cerr << "No se ha podido abrir la video-cámara.\n";
		return -1;
	}
	
	int cara_encontrada = 0;
	
	// Mientras nos se presione Esc, capturamos frames.
	while(true){
		cv::Mat frame, grey_frame;
		camera >> frame;
		
		cv::cvtColor(frame, grey_frame, cv::COLOR_BGR2GRAY);
		stasm_search_single(
			&cara_encontrada,
			puntos_extraidos,
			(char*)grey_frame.data,
			grey_frame.size().width,
			grey_frame.size().height,
			"path",
			"../data");
			
		if(cara_encontrada)
				pinta(frame);

		cv::imshow("video stream", frame);
		if(cv::waitKey(30) >= 0)
			break;
	}
	
	return 0;
}