void camera_control_read_calibration(CameraControl* cc, char* intrinsicsFile, char* distortionFile) { CvMat *intrinsic = (CvMat*) cvLoad(intrinsicsFile, 0, 0, 0); CvMat *distortion = (CvMat*) cvLoad(distortionFile, 0, 0, 0); if (cc->mapx) { cvReleaseImage(&cc->mapx); } if (cc->mapy) { cvReleaseImage(&cc->mapy); } if (intrinsic && distortion) { if (!cc->frame3chUndistort) { cc->frame3chUndistort = cvCloneImage( camera_control_query_frame(cc)); } int width, height; get_metrics(&width, &height); cc->mapx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1); cc->mapy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1); cvInitUndistortMap(intrinsic, distortion, cc->mapx, cc->mapy); // TODO: Shouldn't we free intrinsic and distortion here? } else { fprintf(stderr, "Warning: No lens calibration files found.\n"); } }
// инициализация данных для устранения дисторсии void BaseVideoCapture::initUndistortion(const char* _intrinsics, const char* _distortion) { if(!frame) return; if(!_intrinsics || !_distortion) { printf("[!][BaseVideoCapture] Error: empty filename!\n"); return; } // // загрузка из файлов // // калибровочные коэффициенты intrinsic = (CvMat*)cvLoad( _intrinsics ); // коэффициенты дисторсии distortion = (CvMat*)cvLoad( _distortion ); mapx = cvCreateImage( cvGetSize( frame ), IPL_DEPTH_32F, 1 ); mapy = cvCreateImage( cvGetSize( frame ), IPL_DEPTH_32F, 1 ); // // создание карты для устранения дисторсии cvInitUndistortMap( intrinsic, distortion, mapx, mapy ); // картинка для сохранения исправленной картинки undist = cvCloneImage(frame); }
bool ProjectionModel::loadData() { if(intrinsic_matrix != 0) { cvReleaseMat(&intrinsic_matrix); } if(distortion_coeffs != 0) { cvReleaseMat(&distortion_coeffs); } string intrinsicsPath = path; intrinsicsPath.append("intrinsics.xml"); string distortionPath = path; distortionPath.append("distortion.xml"); intrinsic_matrix = (CvMat*)cvLoad(intrinsicsPath.c_str()); distortion_coeffs = (CvMat*)cvLoad(distortionPath.c_str()); if(intrinsic_matrix == 0) { cout << path.append("intrinsics.xml").c_str() << " not found" << endl; return false; } if(distortion_coeffs == 0) { cout << path.append("distortion.xml").c_str() << " not found" << endl; return false; } return true; }
int main(int argc, char* argv[]) { CvHaarClassifierCascade *pCascadeFrontal = 0, *pCascadeProfile = 0; CvMemStorage *pStorage = 0; CvSeq *pFaceRectSeq; int i; IplImage *pInpImg = cvLoadImage("D:/²âÊÔ/test6.2/6.jpg", CV_LOAD_IMAGE_COLOR); pStorage = cvCreateMemStorage(0); pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/²âÊÔ/test6.2/haarcascade/haarcascade_frontalface_default.xml",0,0,0); pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/²âÊÔ/test6.2/haarcascade/haarcascade_profileface.xml",0,0,0); if (!pInpImg || !pStorage || !pCascadeFrontal || !pCascadeProfile) { printf("L'initilisation a echoue"); exit(-1); } cvNamedWindow("Fenetre de Haar", CV_WINDOW_NORMAL); cvShowImage("Fenetre de Haar", pInpImg); cvWaitKey(50); pFaceRectSeq = cvHaarDetectObjects (pInpImg, pCascadeFrontal, pStorage,1.1,3,CV_HAAR_DO_CANNY_PRUNING,cvSize(0, 0)); for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) { CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i); CvPoint pt1 = { r->x, r->y }; CvPoint pt2 = { r->x + r->width, r->y + r->height }; cvRectangle(pInpImg, pt1, pt2, CV_RGB(0,255,0), 3, 4, 0); cvSetImageROI(pInpImg, *r); cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3); cvResetImageROI(pInpImg); } cvShowImage("Fenetre de Haar", pInpImg); cvWaitKey(1); //²àÁ³ pFaceRectSeq = cvHaarDetectObjects (pInpImg, pCascadeProfile, pStorage, 1.4,3,CV_HAAR_DO_CANNY_PRUNING,cvSize(0, 0)); for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) { CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i); CvPoint pt1 = { r->x, r->y }; CvPoint pt2 = { r->x + r->width, r->y + r->height }; cvRectangle(pInpImg, pt1, pt2, CV_RGB(255,165,0), 3, 4, 0); cvSetImageROI(pInpImg, *r); cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3); cvResetImageROI(pInpImg); } cvShowImage("Fenetre de Haar", pInpImg); cvWaitKey(0); cvDestroyWindow("Fenetre de Haar"); cvReleaseImage(&pInpImg); if (pCascadeFrontal) cvReleaseHaarClassifierCascade(&pCascadeFrontal); if (pCascadeProfile) cvReleaseHaarClassifierCascade(&pCascadeProfile); if (pStorage) cvReleaseMemStorage(&pStorage); }
void camera_control_read_calibration(CameraControl* cc, char* intrinsicsFile, char* distortionFile) { CvMat *intrinsic = (CvMat*) cvLoad(intrinsicsFile, 0, 0, 0); CvMat *distortion = (CvMat*) cvLoad(distortionFile, 0, 0, 0); if (cc->mapx) { cvReleaseImage(&cc->mapx); } if (cc->mapy) { cvReleaseImage(&cc->mapy); } if (intrinsic && distortion) { if (!cc->frame3chUndistort) { enum PSMove_Bool new_frame; cc->frame3chUndistort = cvCloneImage( camera_control_query_frame(cc, NULL, NULL, &new_frame)); } int width, height; get_metrics(&width, &height); cc->mapx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1); cc->mapy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1); cvInitUndistortMap(intrinsic, distortion, cc->mapx, cc->mapy); cc->focl_x = CV_MAT_ELEM(*intrinsic, float, 0, 0); cc->focl_y = CV_MAT_ELEM(*intrinsic, float, 1, 1); // TODO: Shouldn't we free intrinsic and distortion here? } else {
CCamera::CCamera(std::string &img_path, bool color, int imgIdx, std::string &intrinsic, std::string &distortion, std::string &imgext) : verbose_(false) , cam_type_(CAM_SEQ) { // load saved images from disk m_strImgPath = img_path; color_ = color; m_nImgIdx = imgIdx+1; intrinsic_ = (CvMat*)cvLoad(intrinsic.c_str()); distortion_ = (CvMat*)cvLoad(distortion.c_str()); img_ext_ = imgext; // Check the image resolution and save it std::stringstream ss; ss << m_strImgPath << "/" << /*"img" << */std::setw(4) << std::setfill('0') << m_nImgIdx << "." << img_ext_; std::cout<<ss.str().c_str()<<std::endl; IplImage* image = cvLoadImage(ss.str().c_str(), color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); width_ = image->width; height_ = image->height; std::cout << "image resolution: (" << width_ << ", " << height_ << ")" << std::endl; img_mapx_ = NULL; img_mapy_ = NULL; img_input_ = NULL; }
ReturnType IPEL_Haar2FaceEyeDetectionComp::onStart() { _cascade_f = 0; _cascade_e = 0; _storage = 0; if( !_cascade_f ) { _cascade_f = (CvHaarClassifierCascade*)cvLoad( cascade_face_name, 0, 0, 0 ); if(!_cascade_f) printf("Can not load %s\n",cascade_face_name); } if( !_cascade_e ) { _cascade_e = (CvHaarClassifierCascade*)cvLoad( cascade_eye_name, 0, 0, 0 ); if(!_cascade_e) printf("Can not load %s\n",cascade_eye_name); } if( !_storage ) { _storage = cvCreateMemStorage(0); } if( !_cascade_f || !_cascade_e || !_storage ) return OPROS_CALLER_ERROR; PrintMessage ("SUCCESS : IPEL_Haar2FaceEyeDetectionComp::onStart()\n"); return OPROS_SUCCESS; }
bool vpProcess::init() { haarLeft = (CvHaarClassifierCascade*)cvLoad(VP_EYEHAARL, 0, 0, 0); haarRight = (CvHaarClassifierCascade*)cvLoad(VP_EYEHAARR, 0, 0, 0); haarBoth = (CvHaarClassifierCascade*)cvLoad(VP_EYEHAARB, 0, 0, 0); storage = cvCreateMemStorage(0); return (haarLeft != NULL && haarRight != NULL && haarBoth != NULL); }
int faceDetector::setCascadeName(char *name) { if( !(cvLoad( name, 0, 0, 0 )) ) return -1; cascadeName = name; cascade = (CvHaarClassifierCascade*)cvLoad( cascadeName, 0, 0, 0 ); return 0; }
bool CalibrationLoader::loadAllCalibrationFiles(){ char temp[1024]; CvMat* tempMat; string calibPath = BEINGTHERE_ROOT_DIR + "resources/multi-cam/calib"; //string calibPath = "C:/Logs/PCL/data/calib"; for(int i = 0; i < NUM_CAM; i++) { sprintf(temp, "%s/cam_rgb_k%s.xml",calibPath.c_str(), kinect[i].c_str()); tempMat = (CvMat*)cvLoad(temp, NULL, NULL, NULL); intrinMat[i] = cv::Mat(tempMat); intrisic[i].fx = intrinMat[i].at<float>(0, 0); intrisic[i].fy = intrinMat[i].at<float>(1, 1); intrisic[i].cx = intrinMat[i].at<float>(0, 2); intrisic[i].cy = intrinMat[i].at<float>(1, 2); //Load Distortion sprintf(temp, "%s/distort_rgb_k%s.xml",calibPath.c_str(), kinect[i].c_str()); tempMat = (CvMat*)cvLoad(temp, NULL, NULL, NULL); distortMat[i] = cv::Mat(tempMat); } //Load Extrinsic Params for(int i=0; i<NUM_CAM; ++i){ if(kinect_parent[i].compare("-1") != 0) { sprintf(temp, "%s/rot_k%s_k%s.xml", calibPath.c_str(), kinect_parent[i].c_str(), kinect[i].c_str()); tempMat = (CvMat*)cvLoad(temp, NULL, NULL, NULL); extrinMatR[i] = cv::Mat(tempMat); //Convert To Mat33 Types for(int x = 0; x < 3; x++){ extrinsicR[i].data[x].x = extrinMatR[i].at<float>(x, 0); extrinsicR[i].data[x].y = extrinMatR[i].at<float>(x, 1); extrinsicR[i].data[x].z = extrinMatR[i].at<float>(x, 2); } //Load Extrinsic T To cv::Mat sprintf(temp, "%s/trans_k%s_k%s.xml",calibPath.c_str(), kinect_parent[i].c_str(), kinect[i].c_str()); tempMat = (CvMat*)cvLoad(temp, NULL, NULL, NULL); extrinMatT[i] = cv::Mat(tempMat); extrinMatT[i] /= 100.f; //Convert cv::Mat to float3 extrinsicT[i].x = extrinMatT[i].at<float>(0); extrinsicT[i].y = extrinMatT[i].at<float>(1); extrinsicT[i].z = extrinMatT[i].at<float>(2); } } return true; }
bool Classifier::kmeans(DataSet *data) { cout << "------------------------------------------" << endl; cout << "\t\tK-Means" << endl; if (kmeans_load) { cout << "Loading..." << endl; centers = (CvMat *)cvLoad("centers.dat"); data->samples = (CvMat *)cvLoad("samples.dat"); data->responses = (CvMat *)cvLoad("responses.dat"); data->centers = centers; cout << "Loaded Successfully" << endl; return true; } CvMat *desc = data->kmeans_input(); data->clusters = cvCreateMat(data->num_samples, 1, CV_32SC1); centers = cvCreateMat(num_clusters, SURF_SIZE, CV_32FC1); data->centers = centers; cout << "Running with k = " << num_clusters << endl; flush(cout); cvKMeans2( desc, // samples num_clusters, // clusters data->clusters, // labels cvTermCriteria( CV_TERMCRIT_EPS|CV_TERMCRIT_ITER, // End criteria 10, // Max iter 0.1), //accuracy 1, // attempts &rng, //rng 0, // flags centers, // centers NULL // compactness ); if (kmeans_save) { cvSave("centers.dat", centers); cvSave("samples.dat", data->cluster_samples() ); cvSave("responses.dat", data->cluster_responses() ); cout << "Saved!" << endl; } cvReleaseMat(&desc); data->to_kmeans = NULL; return true; }
ObjectDetect::ObjectDetect(const char *haarCascadePath) { m_objects = NULL; m_image = NULL; storage = cvCreateMemStorage(0); if (!haarCascadePath) { m_haarCascade = (CvHaarClassifierCascade *) cvLoad(HAARCASCADE); } else { m_haarCascade = (CvHaarClassifierCascade *) cvLoad(haarCascadePath); } }
void Camera::loadHomography(const char* homographyFile, const char* inverseFile) { if((homography = (CvMat*) cvLoad(homographyFile)) == NULL) { printf("Error loading %s\n", homographyFile); } if((inverse = (CvMat*) cvLoad(inverseFile)) == NULL) { printf("Error loading %s\n", inverseFile); } }
Calibrator() : ROS_Slave() { register_sink(image_in = new FlowImage("image_in"), ROS_CALLBACK(Calibrator, image_received)); codec_in = new ImageCodec<FlowImage>(image_in); register_source(image_out = new FlowImage("image_out")); codec_out = new ImageCodec<FlowImage>(image_out); register_sink(observe = new FlowPTZActuatorNoSub("observe"), ROS_CALLBACK(Calibrator, ptz_received)); register_source(control = new FlowPTZActuatorNoSub("control")); register_sink(key = new FlowSDLKeyEvent("key"), ROS_CALLBACK(Calibrator, key_received)); register_with_master(); cvimage_in = cvCreateMatHeader(480, 704, CV_8UC3); cvimage_out = cvCreateMatHeader(480, 704, CV_8UC3); cvimage_bgr = cvCreateMat(480, 704, CV_8UC3); cvimage_undistort = cvCreateMat(480, 704, CV_8UC3); if ((intrinsic_matrix = (CvMat*)cvLoad("intrinsic.dat")) == 0) { intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 ); } if ((distortion_coeffs = (CvMat*)cvLoad("distortion.dat")) == 0) { distortion_coeffs = cvCreateMat( 4, 1, CV_32FC1 ); } matToScreen(intrinsic_matrix, "intrinsic"); matToScreen(distortion_coeffs, "distortion"); calibrated = false; undistort = false; centering = false; take_pic = false; img_cnt = 0; time_t rawtime; struct tm* timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); sprintf(dir_name, "images/%.2d%.2d%.2d_%.2d%.2d%.2d", timeinfo->tm_mon + 1, timeinfo->tm_mday,timeinfo->tm_year - 100,timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec); if (mkdir(dir_name, 0755)) { std::cout << "Failed to make directory: " << dir_name; } last_corners = new CvPoint2D32f[12*12]; }
ImgProcessing::ImgProcessing() : cascade_name("haarcascade_frontalface.xml") { // Load classifier for face detection cascade = (CvHaarClassifierCascade*)cvLoad(cascade_name, 0, 0, 0 ); // Allocate the memory storage storage = cvCreateMemStorage(0); }
bool CvImage::load( const char* filename, const char* imgname, int color ) { IplImage* img = 0; if( icvIsXmlOrYaml(filename) ) { img = icvRetrieveImage(cvLoad(filename,0,imgname)); if( (img->nChannels > 1) != (color == 0) ) CV_Error( CV_StsNotImplemented, "RGB<->Grayscale conversion is not implemented for images stored in XML/YAML" ); /*{ IplImage* temp_img = 0; temp_img = cvCreateImage( cvGetSize(img), img->depth, color > 0 ? 3 : 1 )); cvCvtColor( img, temp_img, color > 0 ? CV_GRAY2BGR : CV_BGR2GRAY ); cvReleaseImage( &img ); img = temp_img; }*/ } #ifdef HAVE_OPENCV_HIGHGUI else img = cvLoadImage( filename, color ); #endif attach( img ); return img != 0; }
bool CvMatrix::load( const char* filename, const char* matname, int color ) { CvMat* m = 0; if( icvIsXmlOrYaml(filename) ) { m = icvRetrieveMatrix(cvLoad(filename,0,matname)); if( (CV_MAT_CN(m->type) > 1) != (color == 0) ) CV_Error( CV_StsNotImplemented, "RGB<->Grayscale conversion is not implemented for matrices stored in XML/YAML" ); /*{ CvMat* temp_mat; temp_mat = cvCreateMat( m->rows, m->cols, CV_MAKETYPE(CV_MAT_DEPTH(m->type), color > 0 ? 3 : 1 ))); cvCvtColor( m, temp_mat, color > 0 ? CV_GRAY2BGR : CV_BGR2GRAY ); cvReleaseMat( &m ); m = temp_mat; }*/ } #ifdef HAVE_OPENCV_HIGHGUI else m = cvLoadImageM( filename, color ); #endif set( m, false ); return m != 0; }
//Cascade_Name:load Harrlike feature of face,ears,eyes,etc //Do_pyramids:smallize origin image to accerlate excuting harrlike Harrlike::Harrlike(IplImage *img,Coordinate *adjustedCoord, String Cascade_Name,int sizeX,int sizeY,int Do_pyramids,int color) { m_srcImage = cvCloneImage(img); m_OriginalImage = cvCreateImage(cvGetSize(m_srcImage),8,1); m_normalizeImage = cvCreateImage(cvSize(sizeX,sizeY),8,1); cvCopy(m_srcImage,m_OriginalImage); cascade = (CvHaarClassifierCascade*)cvLoad( Cascade_Name.c_str() ); m_PyrDown = Do_pyramids; m_Color = color; m_justSet = true; scale = true; m_do = false; m_width = sizeX; m_height = sizeY; for(int i = 0;i < 12;i++) { m_TraningCoord[i] = adjustedCoord[i]; } if(!(CvHaarClassifierCascade*) cascade) { cout<<"ERROR: Could not load classifier cascade."<<endl; m_justSet = 0; } }
bool InitAsm() { if(! g_AsmFit.Read(ASMFN)) { CString msg; msg.Format("Can't load ASM model file %s.", ASMFN); //::MessageBox(NULL, msg, "message", MB_OK | MB_ICONWARNING); MessageBox1(msg); return false; } g_FDcascade = (CvHaarClassifierCascade*)cvLoad(FDFN, 0, 0, 0); if (!g_FDcascade) { CString msg; msg.Format("Can't load %s.", _T(FDFN)); ::MessageBox1(msg); return false; } g_FDstorage = cvCreateMemStorage(0); if (!g_FDstorage) { ::MessageBox1("Can't create memory storage for face detection."); return false; } return true; }
/*************************************************************************** * 函数名称 * MatchPicture * 参数 * CString imgPath - 图像的路径 * CString xmlPath - 模板路径 * 返回值 * CPoint -返回匹配区域的中心坐标,若检测不到则返回(0,0) * 说明 * 该函数进行图片区域识别 ***************************************************************************/ CPoint CMyImage::MatchPicture(CString imgPath,CString xmlPath) { CPoint centerPoint; CvHaarClassifierCascade* cascade = 0; cascade = (CvHaarClassifierCascade*)cvLoad(xmlPath); //加载特征文件 if( !cascade ) { return CPoint(0,0); } IplImage* image = cvLoadImage( imgPath,1); //加载等待检测的图像 if(image) { centerPoint = Detect_and_Draw(image,cascade,0); //目标检测 cvWaitKey(0); cvReleaseImage(&image); } else { return CPoint(0,0); } cvReleaseHaarClassifierCascade( &cascade); //释放 return centerPoint; }
vector<RECT> facedetect (string imagefilename, string classifierfilename) { CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad(classifierfilename.c_str()); IplImage* image = cvLoadImage(imagefilename); CvMemStorage* storage = cvCreateMemStorage ( ); vector<RECT> list; if( cascade && image && storage ) { CvSeq* faces = cvHaarDetectObjects( img, cascade, storage, 1.1, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(40, 40) ); for( int i = 0; i < (faces ? faces->total : 0); i++ ) { CvRect* r = (CvRect*)cvGetSeqElem( faces, i ); RECT rect ; rect.left = r->x; rect.right = r->x + r->width; rect.top = r->y; rect.bottom = r->y + r->height; list.push_back(rect); } } cvReleaseMemStorage(&storage); cvReleaseImage(&image); cvReleaseHaarClassifierCascade(&cascade); return }
int main(int argc, char **argv) { CvCapture *capture; IplImage *frame; int key; char *filename = "usb.xml"; // Change to the name of classifier cascade = (CvHaarClassifierCascade *) cvLoad(filename, 0, 0, 0); storage = cvCreateMemStorage(0); capture = cvCaptureFromCAM(0); // Check // assert(cascade && storage && capture); cvNamedWindow("video", 1); while(1) { frame = cvQueryFrame(capture); detect(frame); key = cvWaitKey(50); } cvReleaseImage(&frame); cvReleaseCapture(&capture); cvDestroyWindow("video"); cvReleaseHaarClassifierCascade(&cascade); cvReleaseMemStorage(&storage); return 0; }
bool CvMatrix::load( const char* filename, const char* matname, int color ) { CvMat* m = 0; if( icvIsXmlOrYaml(filename) ) { m = icvRetrieveMatrix(cvLoad(filename,0,matname)); if( (CV_MAT_CN(m->type) > 1) != (color == 0) ) CV_Error( CV_StsNotImplemented, "RGB<->Grayscale conversion is not implemented for matrices stored in XML/YAML" ); /*{ CvMat* temp_mat; temp_mat = cvCreateMat( m->rows, m->cols, CV_MAKETYPE(CV_MAT_DEPTH(m->type), color > 0 ? 3 : 1 ))); cvCvtColor( m, temp_mat, color > 0 ? CV_GRAY2BGR : CV_BGR2GRAY ); cvReleaseMat( &m ); m = temp_mat; }*/ } else { if( load_image_m ) m = load_image_m( filename, color ); else CV_Error( CV_StsNotImplemented, "Loading an image stored in such a format requires HigGUI.\n" "Link it to your program and call any function from it\n" ); } set( m, false ); return m != 0; }
void addFace(char *filename,char *personName) { IplImage* pInpImg=0; CvHaarClassifierCascade* pCascade=0; //指向后面从文件中获取的分类器 CvMemStorage* pStorage=0; //存储检测到的人脸数据 CvSeq* pFaceRectSeq; pStorage=cvCreateMemStorage(0); //创建默认大先64k的动态内存区域 pCascade=(CvHaarClassifierCascade*)cvLoad("d:/tools/opencv/data/haarcascades/haarcascade_frontalface_alt2.xml"); //加载分类器 cvLoadImage(filename); if (!pInpImg||!pStorage||!pCascade) { printf("initialization failed:%s\n",(!pInpImg)?"can't load image file":(!pCascade)?"can't load haar-cascade---make sure path is correct":"unable to allocate memory for data storage",argv[1]); return ; } //人脸检测 pFaceRectSeq=cvHaarDetectObjects(pInpImg,pCascade,pStorage, 1.2,2,CV_HAAR_DO_CANNY_PRUNING,cvSize(40,40)); //将检测到的人脸以矩形框标出。 IplImage *reuslt; displaydetection(pInpImg,pFaceRectSeq,filename,reuslt); cvReleaseImage(&pInpImg); cvReleaseHaarClassifierCascade(&pCascade); cvReleaseMemStorage(&pStorage); }
void reinit_detection(char* cascade_location) { if (cascade) { cvReleaseHaarClassifierCascade(&cascade); } cascade = (CvHaarClassifierCascade*)cvLoad( cascade_location, 0, 0, 0 ); }
/*! \fn CvGaborFeature::_FERETBin_F(const CvFeret* feret, int possub, const CvMat *index) const */ CvTrainingData* CvGaborFeature::_FERETBin_F(const CvFeret* feret, int possub, const CvMat *index) const { int nosub = feret->getSub(); int nsamples = feret->getNum(); /* Generate filename */ string path = feret->getMainpath(); char * filename = new char[50]; sprintf(filename, "%s/%d/%d/%d_%d.xml", path.c_str(), iNu, iMu, ix, iy); CvMat* mat = (CvMat*)cvLoad( filename, NULL, NULL, NULL ); //cvTranspose( mat, mat ); CvTrainingData *bindata = new CvTrainingData; bindata->init(2, nsamples, 1); bindata->setdata( mat ); for(int i = 0; i < nsamples; i++) { int id = (int)cvGetReal1D(index, i); if(id == possub) bindata->setclsidxofsample( 1, i); else bindata->setclsidxofsample( 2, i); } bindata->statclsdist(); delete [] filename; return bindata; }
int main( int argc, char** argv ) { cascade_name = "d:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml"; cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); if( !cascade ) { fprintf( stderr, "ERROR: Could not load classifier cascade\n" ); return -1; } storage = cvCreateMemStorage(0); cvNamedWindow( "result", 1 ); const char* filename = "d:\\picture\\1112.jpg"; IplImage* image = cvLoadImage( filename, 1 ); if( image ) { detect_and_draw( image ); cvWaitKey(0); cvReleaseImage( &image ); } cvDestroyWindow("result"); return 0; }
static COMMAND_FUNC( do_new_cascade ) { const char * s; const char *cascade_name; OpenCV_Cascade *casc_p; s=NAMEOF("classifier cascade"); cascade_name = NAMEOF("classifier specification file"); casc_p = ocv_ccasc_of(QSP_ARG s); if( casc_p != NO_CASCADE ){ sprintf(ERROR_STRING,"Classifier cascade %s already exists!?",s); WARN(ERROR_STRING); return; } casc_p = new_ocv_ccasc(QSP_ARG s); if( casc_p == NO_CASCADE ){ sprintf(ERROR_STRING,"Error creating classifier cascade %s",s); WARN(ERROR_STRING); return; } casc_p->ocv_cascade = (CvHaarClassifierCascade*)cvLoad(cascade_name, 0,0,0); if(casc_p->ocv_cascade == NULL) { sprintf(ERROR_STRING,"Error loading cascade from file %s",cascade_name); WARN(ERROR_STRING); return; /* BUG release struct here */ } }
bool CvImage::load( const char* filename, const char* imgname, int color ) { IplImage* img = 0; if( icvIsXmlOrYaml(filename) ) { img = icvRetrieveImage(cvLoad(filename,0,imgname)); if( (img->nChannels > 1) != (color == 0) ) CV_Error( CV_StsNotImplemented, "RGB<->Grayscale conversion is not implemented for images stored in XML/YAML" ); /*{ IplImage* temp_img = 0; temp_img = cvCreateImage( cvGetSize(img), img->depth, color > 0 ? 3 : 1 )); cvCvtColor( img, temp_img, color > 0 ? CV_GRAY2BGR : CV_BGR2GRAY ); cvReleaseImage( &img ); img = temp_img; }*/ } else { if( load_image ) img = load_image( filename, color ); else CV_Error( CV_StsNotImplemented, "Loading an image stored in such a format requires HigGUI.\n" "Link it to your program and call any function from it\n" ); } attach( img ); return img != 0; }
void detectGandhiFace() { char *filename = ".\\data\\haarcascades\\haarcascade_frontalface_alt.xml"; IplImage *frame; /* load the classifier note that I put the file in the same directory with this code */ cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 ); /* setup memory buffer; needed by the face detector */ storage = cvCreateMemStorage( 0 ); /* initialize camera */ assert( cascade && storage); frame=NotetplProcessed; /* always check */ /* 'fix' frame */ //cvFlip( frame, frame, -1 ); frame->origin = 0; /* detect faces and display video */ // ---------- NOW THE FACES DETECT ARE SENT FOR A TEMPLATE MATCH WITH FACE OF GANDHIJI USING detectallfaces FUNCTION-------------- detectAllFaces( frame ); //printf("Global Maximum of %d = %f ",no,globalmaximum); }