double classify_example(MODEL *model, DOC *ex) /* classifies one example */ { register long i; register double dist; if((model->kernel_parm.kernel_type == LINEAR) && (model->lin_weights)) return(classify_example_linear(model,ex)); dist=0; for(i=1;i<model->sv_num;i++) { dist+=kernel(&model->kernel_parm,model->supvec[i],ex)*model->alpha[i]; } return(dist-model->b); }
int SVMLightRunner::librarySVMClassifyMain( int argc, char **argv, bool use_gmumr, SVMConfiguration &config ) { LOG( config.log, LogLevel::DEBUG_LEVEL, __debug_prefix__ + ".librarySVMClassifyMain() Started." ); DOC *doc; /* test example */ WORD *words; long max_docs,max_words_doc,lld; long totdoc=0,queryid,slackid; long correct=0,incorrect=0,no_accuracy=0; long res_a=0,res_b=0,res_c=0,res_d=0,wnum,pred_format; long j; double t1,runtime=0; double dist,doc_label,costfactor; char *line,*comment; FILE *predfl,*docfl; MODEL *model; // GMUM.R changes { librarySVMClassifyReadInputParameters( argc, argv, docfile, modelfile, predictionsfile, &verbosity, &pred_format, use_gmumr, config); if (!use_gmumr) { nol_ll(docfile,&max_docs,&max_words_doc,&lld); /* scan size of input file */ lld+=2; line = (char *)my_malloc(sizeof(char)*lld); } else { max_docs = config.target.n_rows; max_words_doc = config.getDataDim(); config.result = arma::zeros<arma::vec>(max_docs); // Prevent writing to the file pred_format = -1; // lld used only for file reading } max_words_doc+=2; words = (WORD *)my_malloc(sizeof(WORD)*(max_words_doc+10)); // GMUM.R changes } model=libraryReadModel(modelfile, use_gmumr, config); // GMUM.R changes } if(model->kernel_parm.kernel_type == 0) { /* linear kernel */ /* compute weight vector */ add_weight_vector_to_linear_model(model); } if(verbosity>=2) { C_PRINTF("Classifying test examples.."); C_FFLUSH(stdout); } // GMUM.R changes { bool newline; if (!use_gmumr) { if ((predfl = fopen (predictionsfile, "w")) == NULL) { perror (predictionsfile); EXIT (1); } if ((docfl = fopen (docfile, "r")) == NULL) { perror (docfile); EXIT (1); } newline = (!feof(docfl)) && fgets(line,(int)lld,docfl); } else { newline = false; if (totdoc < config.getDataExamplesNumber()) { newline = true; std::string str = SVMConfigurationToSVMLightLearnInputLine(config, totdoc); line = new char[str.size() + 1]; std::copy(str.begin(), str.end(), line); line[str.size()] = '\0'; } } while(newline) { if (use_gmumr) { std::string stringline = ""; } // GMUM.R changes } if(line[0] == '#') continue; /* line contains comments */ parse_document(line,words,&doc_label,&queryid,&slackid,&costfactor,&wnum, max_words_doc,&comment); totdoc++; if(model->kernel_parm.kernel_type == 0) { /* linear kernel */ for(j=0;(words[j]).wnum != 0;j++) { /* Check if feature numbers */ if((words[j]).wnum>model->totwords) /* are not larger than in */ (words[j]).wnum=0; /* model. Remove feature if */ } /* necessary. */ doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0)); t1=get_runtime(); dist=classify_example_linear(model,doc); runtime+=(get_runtime()-t1); free_example(doc,1); } else { /* non-linear kernel */ doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0)); t1=get_runtime(); dist=classify_example(model,doc); runtime+=(get_runtime()-t1); free_example(doc,1); } if(dist>0) { if(pred_format==0) { /* old weired output format */ C_FPRINTF(predfl,"%.8g:+1 %.8g:-1\n",dist,-dist); } if(doc_label>0) correct++; else incorrect++; if(doc_label>0) res_a++; else res_b++; } else { if(pred_format==0) { /* old weired output format */ C_FPRINTF(predfl,"%.8g:-1 %.8g:+1\n",-dist,dist); } if(doc_label<0) correct++; else incorrect++; if(doc_label>0) res_c++; else res_d++; } if(pred_format==1) { /* output the value of decision function */ C_FPRINTF(predfl,"%.8g\n",dist); } if((int)(0.01+(doc_label*doc_label)) != 1) { no_accuracy=1; } /* test data is not binary labeled */ if(verbosity>=2) { if(totdoc % 100 == 0) { C_PRINTF("%ld..",totdoc); C_FFLUSH(stdout); } } // GMUM.R changes { if (!use_gmumr) { newline = (!feof(docfl)) && fgets(line,(int)lld,docfl); } else { newline = false; // Store prediction result in config config.result[totdoc-1] = dist; // Read next line if (totdoc < config.getDataExamplesNumber()) { newline = true; std::string str = SVMConfigurationToSVMLightLearnInputLine(config, totdoc); line = new char[str.size() + 1]; std::copy(str.begin(), str.end(), line); line[str.size()] = '\0'; } } } if (!use_gmumr) { fclose(predfl); fclose(docfl); free(line); } // GMUM.R changes } free(words); free_model(model,1); if(verbosity>=2) { C_PRINTF("done\n"); /* Note by Gary Boone Date: 29 April 2000 */ /* o Timing is inaccurate. The timer has 0.01 second resolution. */ /* Because classification of a single vector takes less than */ /* 0.01 secs, the timer was underflowing. */ C_PRINTF("Runtime (without IO) in cpu-seconds: %.2f\n", (float)(runtime/100.0)); } if((!no_accuracy) && (verbosity>=1)) { C_PRINTF("Accuracy on test set: %.2f%% (%ld correct, %ld incorrect, %ld total)\n",(float)(correct)*100.0/totdoc,correct,incorrect,totdoc); C_PRINTF("Precision/recall on test set: %.2f%%/%.2f%%\n",(float)(res_a)*100.0/(res_a+res_b),(float)(res_a)*100.0/(res_a+res_c)); } return(0); }
int main_classify (int argc, char* argv[]) { DOC *doc; /* test example */ WORDSVM *words; long max_docs,max_words_doc,lld; long totdoc=0,queryid,slackid; long correct=0,incorrect=0,no_accuracy=0; long res_a=0,res_b=0,res_c=0,res_d=0,wnum,pred_format; long j; double t1,runtime=0; double dist,doc_label,costfactor; char *line,*comment; FILE *predfl,*docfl; MODEL *model; read_input_parameters(argc,argv,docfile,modelfile,predictionsfile, &verbosity,&pred_format); nol_ll(docfile,&max_docs,&max_words_doc,&lld); /* scan size of input file */ max_words_doc+=2; lld+=2; line = (char *)my_malloc(sizeof(char)*lld); words = (WORDSVM *)my_malloc(sizeof(WORDSVM)*(max_words_doc+10)); model=read_model(modelfile); if(model->kernel_parm.kernel_type == 0) { /* linear kernel */ /* compute weight vector */ add_weight_vector_to_linear_model(model); } if(verbosity>=2) { printf("Classifying test examples.."); fflush(stdout); } if ((docfl = fopen (docfile, "r")) == NULL) { perror (docfile); exit (1); } if ((predfl = fopen (predictionsfile, "w")) == NULL) { perror (predictionsfile); exit (1); } while((!feof(docfl)) && fgets(line,(int)lld,docfl)) { if(line[0] == '#') continue; /* line contains comments */ parse_document(line,words,&doc_label,&queryid,&slackid,&costfactor,&wnum, max_words_doc,&comment); totdoc++; if(model->kernel_parm.kernel_type == 0) { /* linear kernel */ for(j=0;(words[j]).wnum != 0;j++) { /* Check if feature numbers */ if((words[j]).wnum>model->totwords) /* are not larger than in */ (words[j]).wnum=0; /* model. Remove feature if */ } /* necessary. */ doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0)); t1=get_runtime(); dist=classify_example_linear(model,doc); runtime+=(get_runtime()-t1); free_example(doc,1); } else { /* non-linear kernel */ doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0)); t1=get_runtime(); dist=classify_example(model,doc); runtime+=(get_runtime()-t1); free_example(doc,1); } if(dist>0) { if(pred_format==0) { /* old weired output format */ fprintf(predfl,"%.8g:+1 %.8g:-1\n",dist,-dist); } if(doc_label>0) correct++; else incorrect++; if(doc_label>0) res_a++; else res_b++; } else { if(pred_format==0) { /* old weired output format */ fprintf(predfl,"%.8g:-1 %.8g:+1\n",-dist,dist); } if(doc_label<0) correct++; else incorrect++; if(doc_label>0) res_c++; else res_d++; } if(pred_format==1) { /* output the value of decision function */ fprintf(predfl,"%.8g\n",dist); } if((int)(0.01+(doc_label*doc_label)) != 1) { no_accuracy=1; } /* test data is not binary labeled */ if(verbosity>=2) { if(totdoc % 100 == 0) { printf("%ld..",totdoc); fflush(stdout); } } } free(line); free(words); free_model(model,1); if(verbosity>=2) { printf("done\n"); /* Note by Gary Boone Date: 29 April 2000 */ /* o Timing is inaccurate. The timer has 0.01 second resolution. */ /* Because classification of a single vector takes less than */ /* 0.01 secs, the timer was underflowing. */ printf("Runtime (without IO) in cpu-seconds: %.2f\n", (float)(runtime/100.0)); } if((!no_accuracy) && (verbosity>=1)) { printf("Accuracy on test set: %.2f%% (%ld correct, %ld incorrect, %ld total)\n",(float)(correct)*100.0/totdoc,correct,incorrect,totdoc); printf("Precision/recall on test set: %.2f%%/%.2f%%\n",(float)(res_a)*100.0/(res_a+res_b),(float)(res_a)*100.0/(res_a+res_c)); } return(0); }
int Classifier::getZone(IplImage* frame, double& confidence, FrameAnnotation& fa) { if (!leftEye || !rightEye || !nose) { string err = "Classifier::getZone. Location extractors malformed."; throw (err); } // the roi offset CvPoint offset; // LOIs CvPoint leftEyeLocation; CvPoint rightEyeLocation; CvPoint noseLocation; // computing the confidence of the location identification double leftPSR; double rightPSR; double nosePSR; CvPoint center = fa.getLOI(Annotations::Face); if (!center.x || !center.y) { center.x = Globals::imgWidth / 2; center.y = Globals::imgHeight / 2; fa.setFace(center); } offset.x = offset.y = 0; IplImage* roi = (roiFunction)? roiFunction(frame, fa, offset, Annotations::Face) : 0; // all location extractors do identical preprocessing. Therefore, preprocess // once using say the left eye extractor and re-use it for all three extractors fftw_complex* preprocessedImage = leftEye->getPreprocessedImage((roi)? roi : frame); #pragma omp parallel sections num_threads(2) { #pragma omp section { leftEye->setImage(preprocessedImage); leftEye->apply(); leftEye->getMaxLocation(leftEyeLocation, leftPSR); leftEyeLocation.x += offset.x; leftEyeLocation.y += offset.y; } #pragma omp section { // get the location of the right eye rightEye->setImage(preprocessedImage); rightEye->apply(); rightEye->getMaxLocation(rightEyeLocation, rightPSR); rightEyeLocation.x += offset.x; rightEyeLocation.y += offset.y; } } if (roi) cvReleaseImage(&roi); center.x = (leftEyeLocation.x + rightEyeLocation.x) / 2; center.y = leftEyeLocation.y + Globals::noseDrop; fa.setNose(center); offset.x = offset.y = 0; roi = (roiFunction)? roiFunction(frame, fa, offset, Annotations::Nose) : 0; // free the preprocessed image fftw_free(preprocessedImage); // all location extractors do identical preprocessing. Therefore, preprocess // once using say the left eye extractor and re-use it for all three extractors preprocessedImage = nose->getPreprocessedImage((roi)? roi : frame); // get the location of the nose nose->setImage(preprocessedImage); nose->apply(); nose->getMaxLocation(noseLocation, nosePSR); noseLocation.x += offset.x; noseLocation.y += offset.y; // free the preprocessed image fftw_free(preprocessedImage); fa.setLeftIris(leftEyeLocation); fa.setRightIris(rightEyeLocation); fa.setNose(noseLocation); // we are done with the images at this point. Free roi if not zero if (roi) cvReleaseImage(&roi); // cout << "Confidence (L, R, N) = (" << leftPSR << ", " << // rightPSR << ")" << endl; // extract features vector vector<double> data; for (int i = 0; i < nFeatures; i++) { double value = featureExtractors[i]->extract(&fa); data.push_back(value); } // normalize normalize(data); // create SVM Light objects to classify DOC* doc; WORD* words = (WORD*)malloc(sizeof(WORD) * (nFeatures + 1)); for (int i = 0; i < nFeatures; i++) { words[i].wnum = featureExtractors[i]->getId(); words[i].weight = data[i]; } // SVM Light expects that the features vector has a zero element // to indicate termination and hence words[nFeatures].wnum = 0; words[nFeatures].weight = 0.0; // create doc string comment = "Gaze SVM"; doc = create_example(-1, 0, 0, 0.0, create_svector(words, (char*)comment.c_str(), 1.0)); int maxIndex = 0; confidence = -FLT_MAX; double dists[Globals::numZones]; // classify using each zone model #pragma omp parallel for num_threads(Globals::numZones) for (unsigned int i = 0; i < Globals::numZones; i++) { if (kernelType == Trainer::Linear) dists[i] = classify_example_linear(models[i], doc); else dists[i] = classify_example(models[i], doc); } for (unsigned int i = 0; i < Globals::numZones; i++) { if (confidence < dists[i]) { confidence = dists[i]; maxIndex = i + 1; } } free_example(doc, 1); free(words); return maxIndex; }