int _svm_struct_learn (int argc, char* argv[]) { SAMPLE sample; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; svm_struct_learn_api_init(argc,argv); _svm_struct_learn_read_input_parameters(argc,argv,trainfile,modelfile,&verbosity, &struct_verbosity,&struct_parm,&learn_parm, &kernel_parm,&alg_type); if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } /* read the training examples */ sample=read_struct_examples(trainfile,&struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ if(alg_type == 0) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_ALG); else if(alg_type == 1) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_SHRINK_ALG); else if(alg_type == 2) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_CACHE_ALG); else if(alg_type == 9) svm_learn_struct_joint_custom(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel); else exit(1); /* Warning: The model contains references to the original data 'docs'. If you want to free the original data, and only keep the model, you have to make a deep copy of 'model'. */ if(struct_verbosity>=1) { printf("Writing learned model...");fflush(stdout); } write_struct_model(modelfile,&structmodel,&struct_parm); if(struct_verbosity>=1) { printf("done\n");fflush(stdout); } free_struct_sample(sample); free_struct_model(structmodel); svm_struct_learn_api_exit(); return 0; }
int _svm_struct_learn (int argc, char* argv[]) { char trainfile[200]; /* file with training examples */ char modelfile[200]; /* file for resulting classifier */ SAMPLE sample; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; HIDEO_ENV *hideo_env=create_env(); svm_struct_learn_api_init(argc,argv); svm_struct_main_read_input_parameters(argc,argv,trainfile,modelfile,&verbosity, &struct_verbosity,&struct_parm,&learn_parm, &kernel_parm,&alg_type); if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } /* read the training examples */ sample=read_struct_examples(trainfile,&struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,hideo_env); else if(alg_type == 2) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,PRIMAL_ALG,hideo_env); else if(alg_type == 3) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_ALG,hideo_env); else if(alg_type == 4) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_CACHE_ALG,hideo_env); else exit(1); /* Warning: The model contains references to the original data 'docs'. If you want to free the original data, and only keep the model, you have to make a deep copy of 'model'. */ if(struct_verbosity>=1) { printf("Writing learned model...");fflush(stdout); } write_struct_model(modelfile,&structmodel,&struct_parm); if(struct_verbosity>=1) { printf("done\n");fflush(stdout); } free_struct_sample(sample); free_struct_model(structmodel); svm_struct_learn_api_exit(); return 0; }
int main (int argc, char* argv[]) { SAMPLE sample; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; /* Allow the API to perform whatever initialization is required. */ api_initialize(argv[0]); read_input_parameters(argc,argv,trainfile,modelfile,&verbosity, &struct_verbosity,&struct_parm,&learn_parm, &kernel_parm,&alg_type); if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } /* read the training examples */ sample=read_struct_examples(trainfile,&struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_CACHE_ALG); else exit(1); /* Warning: The model contains references to the original data 'docs'. If you want to free the original data, and only keep the model, you have to make a deep copy of 'model'. */ if(struct_verbosity>=1) { printf("Writing learned model...");fflush(stdout); } write_struct_model(modelfile,&structmodel,&struct_parm); if(struct_verbosity>=1) { printf("done\n");fflush(stdout); } free_struct_sample(sample); free_struct_model(structmodel); /* Allow the API to perform whatever cleanup is required. */ api_finalize(); return 0; }
int main(int argc, char* argv[]) { double avgloss,l; long i, correct; char testfile[1024]; char modelfile[1024]; STRUCTMODEL model; STRUCT_LEARN_PARM sparm; LEARN_PARM lparm; KERNEL_PARM kparm; SAMPLE testsample; LABEL y; LATENT_VAR h; /* read input parameters */ read_input_parameters(argc,argv,testfile,modelfile,&sparm); /* read model file */ printf("Reading model..."); fflush(stdout); // model = read_struct_model(modelfile, &sparm); printf("done.\n"); /* read test examples */ printf("Reading test examples..."); fflush(stdout); testsample = read_struct_examples(testfile,&sparm); printf("done.\n"); init_struct_model(testsample,&model,&sparm,&lparm,&kparm); avgloss = 0.0; correct = 0; for (i=0;i<testsample.n;i++) { classify_struct_example(testsample.examples[i].x,&y,&h,&model,&sparm); l = loss(testsample.examples[i].y,y,h,&sparm); avgloss += l; if (l==0) correct++; free_label(y); free_latent_var(h); } printf("Average loss on test set: %.4f\n", avgloss/testsample.n); printf("Zero/one error on test set: %.4f\n", 1.0 - ((float) correct)/testsample.n); free_struct_sample(testsample); free_struct_model(model,&sparm); return(0); }
int main(int argc, char* argv[]) { double *scores = NULL; long i; char testfile[1024]; char modelfile[1024]; char scoreFile[1024]; FILE *fscore; STRUCTMODEL model; STRUCT_LEARN_PARM sparm; LEARN_PARM lparm; KERNEL_PARM kparm; SAMPLE testsample; /* read input parameters */ read_input_parameters(argc,argv,testfile,modelfile,scoreFile,&sparm); fscore = fopen(scoreFile,"w"); /* read model file */ printf("Reading model..."); fflush(stdout); model = read_struct_model(modelfile, &sparm); printf("done.\n"); /* read test examples */ printf("Reading test examples..."); fflush(stdout); testsample = read_struct_test_examples(testfile,&sparm); printf("done.\n"); init_struct_model(testsample,&model,&sparm,&lparm,&kparm); scores = classify_struct_example(testsample.examples[0].x,&model); for(i = 0; i < (testsample.examples[0].n_pos+testsample.examples[0].n_neg); i++){ fprintf(fscore, "%0.5f\n", scores[i]); } fclose(fscore); //free_struct_sample(testsample); TODO: Uncomment this, and fix this function. It frees h.h_is which was never allocated while classifying. free_struct_model(model,&sparm); return(0); }
int main(int argc, char* argv[]) { // The file to create the online version of the code printf("Runs with F1 loss in the loss-augmented objective .. only positive data .. with weighting of Fscores .. no regions file"); // double *w; /* weight vector */ double C, epsilon, Cdash; LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; char trainfile[1024]; char modelfile[1024]; int MAX_ITER; SAMPLE sample; STRUCT_LEARN_PARM sparm; STRUCTMODEL sm; /* read input parameters */ my_read_input_parameters(argc, argv, trainfile, modelfile, &learn_parm, &kernel_parm, &sparm); epsilon = learn_parm.eps; C = learn_parm.svm_c; Cdash = learn_parm.Cdash; MAX_ITER = learn_parm.maxiter; /* read in examples */ //strcpy(trainfile, "dataset/reidel_trainSVM.small.data"); sample = read_struct_examples(trainfile,&sparm); /* initialization */ init_struct_model(sample,&sm,&sparm,&learn_parm,&kernel_parm); // (OnlineSVM : Commenting 'w' as they are replaced by 'w_iters' // w = create_nvector(sm.sizePsi); // clear_nvector(w, sm.sizePsi); // sm.w = w; /* establish link to w, as long as w does not change pointer */ double *zeroes = create_nvector(sm.sizePsi); clear_nvector(zeroes, sm.sizePsi); // printf("Addr. of w (init) %x\t%x\n",w,sm.w); time_t time_start_full, time_end_full; int eid,totalEpochs=learn_parm.totalEpochs; int chunkid, numChunks=learn_parm.numChunks; double primal_obj_sum, primal_obj; char chunk_trainfile[1024]; SAMPLE * chunk_dataset = (SAMPLE *) malloc(sizeof(SAMPLE)*numChunks); /** * If we have ‘k’ instances and do ‘n’ epochs, after processing each chunk we update the weight. * Since we do ‘k’ updates, we will have ‘k’ weight vectors after each epoch. * After ‘n’ epochs, we will have ‘k*n’ weight vectors. */ // -------------------------------------------------------------------------------------------------------------------------------- double ***w_iters = (double**) malloc(totalEpochs*sizeof(double**)); // printf("--2: After 1st malloc -- %x; sz = %d\n", w_iters, totalEpochs*sizeof(double**)); for(eid = 0; eid < totalEpochs; eid++){ w_iters[eid] = (double*) malloc(numChunks*sizeof(double*)); // printf("2.5... id = %d, .. allocated ... %x; sz = %d\n",eid, w_iters[eid],numChunks*sizeof(double*)); } printf("--3: After 2nd malloc \n"); for(eid = 0; eid < totalEpochs; eid++){ for(chunkid = 0; chunkid < numChunks; chunkid++){ w_iters[eid][chunkid] = create_nvector(sm.sizePsi); // printf("Confirming memory location : %x\n",w_iters[eid][chunkid]); clear_nvector(w_iters[eid][chunkid], sm.sizePsi); } } sm.w_iters = w_iters; printf("(ONLINE SVM) Completed the memory alloc for the parameters\n"); // -------------------------------------------------------------------------------------------------------------------------------- /** * Having divided the dataset (X,Y) into set of 'k' chunks / sub-datasets (X_1,Y_1) ... (X_k, Y_k) * Do the following do while routine for one set of datapoints (sub-datasets) */ // -------------------------------------------------------------------------------------------------------------------------------- printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Changed .... Calling Java to split dataset\n"); char *cmd = malloc(1000); strcpy(cmd,"java -Xmx1G -cp java/bin:java/lib/* " " javaHelpers.splitDataset "); strcat(cmd, trainfile); strcat(cmd, " "); char numChunks_str[10]; sprintf(numChunks_str, "%d", numChunks); strcat(cmd, numChunks_str); strcat(cmd, " "); printf("Executing cmd : %s\n", cmd);fflush(stdout); system(cmd); // -------------------------------------------------------------------------------------------------------------------------------- for(chunkid = 0; chunkid < numChunks; chunkid++) { memset(chunk_trainfile, 0, 1024); strcat(chunk_trainfile,trainfile); strcat(chunk_trainfile,".chunks/chunk."); // NOTE: Name hard-coded according to the convention used to create chunked files char chunkid_str[10];sprintf(chunkid_str, "%d", chunkid); strcat(chunk_trainfile,chunkid_str); printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Changed .... Reading chunked dataset\n"); printf("Chunk trainfile : %s\n",chunk_trainfile); chunk_dataset[chunkid] = read_struct_examples_chunk(chunk_trainfile); } time(&time_start_full); for(eid = 0; eid < totalEpochs; eid++) { printf("(ONLINE LEARNING) : EPOCH %d\n",eid); primal_obj_sum = 0.0; for(chunkid = 0; chunkid < numChunks; chunkid++) // NOTE: Chunkid starts from 1 and goes upto numChumks { int sz = sample.n / numChunks; int datasetStartIdx = (chunkid) * sz; int chunkSz = (numChunks-1 == chunkid) ? (sample.n - ((numChunks-1)*sz) ) : (sz); primal_obj = optimizeMultiVariatePerfMeasure(chunk_dataset[chunkid], datasetStartIdx, chunkSz, &sm, &sparm, C, Cdash, epsilon, MAX_ITER, &learn_parm, trainfile, w_iters, eid, chunkid, numChunks, zeroes); printf("(ONLINE LEARNING) : FINISHED PROCESSING CHUNK (PSEUDO-DATAPOINT) %d of %d\n",chunkid+1, numChunks); primal_obj_sum += primal_obj; printf("(OnlineSVM) : Processed pseudo-datapoint -- primal objective sum: %.4f\n", primal_obj_sum); } // After the completion of one epoch, warm start the 2nd epoch with the values of the // weight vectors seen at the end of the last chunk in previous epoch if(eid + 1 < totalEpochs){ //init w_iters[eid+1][0] to w_iters[eid][numChunks-1] copy_vector(w_iters[eid+1][0], w_iters[eid][numChunks-1], sm.sizePsi); printf("(ONLINE LEARNING) : WARM START ACROSS EPOCHS ..... DONE....\n"); } printf("(OnlineSVM) : EPOCH COMPLETE -- primal objective: %.4f\n", primal_obj); printf("(ONLINE LEARNING) : EPOCH %d DONE! .....\n",eid); } time(&time_end_full); char msg[20]; sprintf(msg,"(ONLINE LEARNING) : Total Time Taken : "); print_time(time_start_full, time_end_full, msg); printf("(ONLINE LEARNING) Reached here\n"); /* write structural model */ write_struct_model_online(modelfile, &sm, &sparm, totalEpochs, numChunks); // skip testing for the moment printf("(ONLINE LEARNING) Complete dumping\n"); /* free memory */ //TODO: Need to change this ... free_struct_sample(sample); free_struct_model(sm, &sparm); return(0); }
int main (int argc, char* argv[]) { long correct=0,incorrect=0,no_accuracy=0; long i; double t1,runtime=0; double avgloss=0,l; FILE *predfl; STRUCTMODEL model; STRUCT_LEARN_PARM sparm; STRUCT_TEST_STATS teststats; SAMPLE testsample; LABEL y; svm_struct_classify_api_init(argc,argv); read_input_parameters(argc,argv,testfile,modelfile,predictionsfile,&sparm, &verbosity,&struct_verbosity); if(struct_verbosity>=1) { printf("Reading model..."); fflush(stdout); } model=read_struct_model(modelfile,&sparm); if(struct_verbosity>=1) { fprintf(stdout, "done.\n"); } if(model.svm_model->kernel_parm.kernel_type == LINEAR) { /* linear kernel */ /* compute weight vector */ //add_weight_vector_to_linear_model(model.svm_model); //model.w=model.svm_model->lin_weights; } if(struct_verbosity>=1) { printf("Reading test examples..."); fflush(stdout); } testsample=read_struct_examples(testfile,&sparm); if(struct_verbosity>=1) { printf("done.\n"); fflush(stdout); } if(struct_verbosity>=1) { printf("Classifying test examples..."); fflush(stdout); } if ((predfl = fopen (predictionsfile, "w")) == NULL) { perror (predictionsfile); exit (1); } for(i=0;i<testsample.n;i++) { t1=get_runtime(); y=classify_struct_example(testsample.examples[i].x,&model,&sparm); runtime+=(get_runtime()-t1); write_label(predfl,y); l=loss(testsample.examples[i].y,y,&sparm); avgloss+=l; if(l == 0) correct++; else incorrect++; eval_prediction(i,testsample.examples[i],y,&model,&sparm,&teststats); if(empty_label(testsample.examples[i].y)) { no_accuracy=1; } /* test data is not labeled */ if(struct_verbosity>=2) { if((i+1) % 100 == 0) { printf("%ld..",i+1); fflush(stdout); } } free_label(y); } avgloss/=testsample.n; fclose(predfl); if(struct_verbosity>=1) { printf("done\n"); printf("Runtime (without IO) in cpu-seconds: %.2f\n", (float)(runtime/100.0)); } if((!no_accuracy) && (struct_verbosity>=1)) { printf("Average loss on test set: %.4f\n",(float)avgloss); printf("Zero/one-error on test set: %.2f%% (%ld correct, %ld incorrect, %d total)\n",(float)100.0*incorrect/testsample.n,correct,incorrect,testsample.n); } print_struct_testing_stats(testsample,&model,&sparm,&teststats); free_struct_sample(testsample); free_struct_model(model); svm_struct_classify_api_exit(); return(0); }
int main (int argc, char* argv[]) { SAMPLE sample; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; svm_struct_learn_api_init(argc,argv); read_input_parameters(argc,argv,trainfile,modelfile,&verbosity, &struct_verbosity,&struct_parm,&learn_parm, &kernel_parm,&alg_type); if(struct_verbosity>=1) { //verbose = true; printf("Reading training examples..."); fflush(stdout); } /* read the training examples */ sample=read_struct_examples(trainfile,&struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } string config_tmp; bool update_loss_function = false; if(Config::Instance()->getParameter("update_loss_function", config_tmp)) { update_loss_function = config_tmp.c_str()[0] == '1'; } printf("[Main] update_loss_function=%d\n", (int)update_loss_function); if(!update_loss_function) { printf("update_loss_function should be true\n"); exit(-1); } eUpdateType updateType = UPDATE_NODE_EDGE; if(Config::Instance()->getParameter("update_type", config_tmp)) { updateType = (eUpdateType)atoi(config_tmp.c_str()); } printf("[Main] update_type=%d\n", (int)updateType); mkdir(loss_dir, 0777); // check if parameter vector files exist const char* parameter_vector_dir = "parameter_vector"; int idx = 0; string parameter_vector_dir_last = findLastFile(parameter_vector_dir, "", &idx); string parameter_vector_file_pattern = parameter_vector_dir_last + "/iteration_"; int idx_1 = 1; string parameter_vector_file_last = findLastFile(parameter_vector_file_pattern, ".txt", &idx_1); printf("[Main] Checking parameter vector file %s\n", parameter_vector_file_last.c_str()); // vectors used to store RF weights vector<double>* alphas = new vector<double>[sample.n]; vector<double>* alphas_edge = 0; if(updateType == UPDATE_NODE_EDGE) { alphas_edge = new vector<double>[sample.n]; } int alphas_idx = 0; if(fileExists("alphas.txt") && fileExists(parameter_vector_file_last)) { // Loading alpha coefficients ifstream ifs("alphas.txt"); string line; int lineIdx = 0; while(lineIdx < sample.n && getline(ifs, line)) { vector<string> tokens; splitString(line, tokens); for(vector<string>::iterator it = tokens.begin(); it != tokens.end(); ++it) { alphas[lineIdx].push_back(atoi(it->c_str())); } ++lineIdx; } ifs.close(); if(lineIdx > 0) { alphas_idx = alphas[0].size(); } // Loading parameters printf("[Main] Found parameter vector file %s\n", parameter_vector_file_last.c_str()); struct_parm.ssvm_iteration = idx + 1; update_output_dir(struct_parm.ssvm_iteration); //EnergyParam param(parameter_vector_file_last.c_str()); //updateCoeffs(sample, param, struct_parm, updateType, alphas, alphas_idx); //alphas_idx = 1; } else { struct_parm.ssvm_iteration = 0; // insert alpha coefficients for first iteration for(int i = 0; i < sample.n; ++i) { alphas[i].push_back(1); } ofstream ofs("alphas.txt", ios::app); int i = 0; for(; i < sample.n - 1; ++i) { ofs << alphas[i][alphas_idx] << " "; } if(i < sample.n) { ofs << alphas[i][alphas_idx]; } ofs << endl; ofs.close(); // edges for(int i = 0; i < sample.n; ++i) { alphas_edge[i].push_back(1); } ofstream ofse("alphas_edge.txt", ios::app); i = 0; for(; i < sample.n - 1; ++i) { ofse << alphas_edge[i][alphas_idx] << " "; } if(i < sample.n) { ofse << alphas_edge[i][alphas_idx]; } ofse << endl; ofse.close(); ++alphas_idx; } const int nMaxIterations = 5; bool bIterate = true; do { printf("-----------------------------------------SSVM-ITERATION-%d-START\n", struct_parm.ssvm_iteration); struct_parm.iterationId = 1; /* Do the learning and return structmodel. */ if(alg_type == 0) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_ALG); else if(alg_type == 1) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_SHRINK_ALG); else if(alg_type == 2) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_CACHE_ALG); else if(alg_type == 9) svm_learn_struct_joint_custom(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel); else exit(1); char _modelfile[BUFFER_SIZE]; //sprintf(_modelfile, "%s_%d", modelfile, struct_parm.ssvm_iteration); sprintf(_modelfile, "%s_%d", modelfile, struct_parm.ssvm_iteration); printf("[Main] Writing learned model to %s\n", _modelfile); write_struct_model(_modelfile, &structmodel, &struct_parm); // Run inference on training data and increase loss for misclassified points printf("[Main] Loading learned model to %s\n", _modelfile); EnergyParam param(_modelfile); updateCoeffs(sample, param, struct_parm, updateType, alphas, alphas_edge, struct_parm.ssvm_iteration + 1); ofstream ofs("alphas.txt", ios::app); int i = 0; for(; i < sample.n - 1; ++i) { ofs << alphas[i][alphas_idx] << " "; } if(i < sample.n) { ofs << alphas[i][alphas_idx]; } ofs << endl; ofs.close(); ofstream ofse("alphas_edge.txt", ios::app); i = 0; for(; i < sample.n - 1; ++i) { ofse << alphas_edge[i][alphas_idx] << " "; } if(i < sample.n) { ofse << alphas_edge[i][alphas_idx]; } ofse << endl; ofse.close(); ++alphas_idx; printf("-----------------------------------------SSVM-ITERATION-%d-END\n", struct_parm.ssvm_iteration); ++struct_parm.ssvm_iteration; bIterate = (nMaxIterations == -1 || struct_parm.ssvm_iteration < nMaxIterations); } while(bIterate); // Output final segmentation for all examples long nExamples = sample.n; int nRFs = struct_parm.ssvm_iteration; double* lossPerLabel = 0; labelType* groundTruthLabels = 0; for(int i = 0; i < nExamples; i++) { /*** example loop ***/ Slice_P* slice = sample.examples[i].x.slice; Feature* feature = sample.examples[i].x.feature; //map<sidType, nodeCoeffType>* nodeCoeffs = sample.examples[i].x.nodeCoeffs; //map<sidType, edgeCoeffType>* edgeCoeffs = sample.examples[i].x.edgeCoeffs; map<sidType, nodeCoeffType>* nodeCoeffs = 0; map<sidType, edgeCoeffType>* edgeCoeffs = 0; int nNodes = slice->getNbSupernodes(); stringstream soutPb; soutPb << loss_dir; soutPb << "pb_"; soutPb << getNameFromPathWithoutExtension(slice->getName()); soutPb << "_"; soutPb << "combined"; //soutPb << setw(5) << setfill('0') << ssvm_iteration; soutPb << ".tif"; printf("[Main] Exporting %s\n", soutPb.str().c_str()); labelType* inferredLabels = computeCombinedLabels(slice, feature, groundTruthLabels, lossPerLabel, nRFs, alphas, i, nodeCoeffs, edgeCoeffs, soutPb.str().c_str()); stringstream sout; sout << loss_dir; sout << getNameFromPathWithoutExtension(slice->getName()); sout << "_"; sout << "combined"; //sout << setw(5) << setfill('0') << ssvm_iteration; sout << ".tif"; printf("[Main] Exporting %s\n", sout.str().c_str()); slice->exportOverlay(sout.str().c_str(), inferredLabels); stringstream soutBW; soutBW << loss_dir; soutBW << getNameFromPathWithoutExtension(slice->getName()); soutBW << "_"; soutBW << "combined"; //soutBW << setw(5) << setfill('0') << ssvm_iteration; soutBW << "_BW.tif"; printf("[Main] Exporting %s\n", soutBW.str().c_str()); slice->exportSupernodeLabels(soutBW.str().c_str(), struct_parm.nClasses, inferredLabels, nNodes, &(struct_parm.labelToClassIdx)); delete[] inferredLabels; } free_struct_sample(sample); free_struct_model(structmodel); svm_struct_learn_api_exit(); return 0; }
int main(int argc, char* argv[]) { double *w; /* weight vector */ long m, i; double C, epsilon; LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; char trainfile[1024]; char modelfile[1024]; int MAX_ITER; /* new struct variables */ SVECTOR **fycache, *diff, *fy; EXAMPLE *ex; SAMPLE alldata; SAMPLE sample; SAMPLE val; STRUCT_LEARN_PARM sparm; STRUCTMODEL sm; double primal_obj; double stop_crit; char itermodelfile[2000]; /* self-paced learning variables */ double init_spl_weight; double spl_weight; double spl_factor; int *valid_examples; /* read input parameters */ my_read_input_parameters(argc, argv, trainfile, modelfile, &learn_parm, &kernel_parm, &sparm, &init_spl_weight, &spl_factor); epsilon = learn_parm.eps; C = learn_parm.svm_c; MAX_ITER = learn_parm.maxiter; /* read in examples */ alldata = read_struct_examples(trainfile,&sparm); int ntrain = (int) round(1.0*alldata.n); /* no validation set */ if(ntrain < alldata.n) { long *perm = randperm(alldata.n); sample = generate_train_set(alldata, perm, ntrain); val = generate_validation_set(alldata, perm, ntrain); free(perm); } else { sample = alldata; } ex = sample.examples; m = sample.n; /* initialization */ init_struct_model(alldata,&sm,&sparm,&learn_parm,&kernel_parm); w = create_nvector(sm.sizePsi); clear_nvector(w, sm.sizePsi); sm.w = w; /* establish link to w, as long as w does not change pointer */ /* some training information */ printf("C: %.8g\n", C); printf("spl weight: %.8g\n",init_spl_weight); printf("epsilon: %.8g\n", epsilon); printf("sample.n: %d\n", sample.n); printf("sm.sizePsi: %ld\n", sm.sizePsi); fflush(stdout); /* prepare feature vector cache for correct labels with imputed latent variables */ fycache = (SVECTOR**)malloc(m*sizeof(SVECTOR*)); for (i=0;i<m;i++) { fy = psi(ex[i].x, ex[i].y, &sm, &sparm); diff = add_list_ss(fy); free_svector(fy); fy = diff; fycache[i] = fy; } /* learn initial weight vector using all training examples */ valid_examples = (int *) malloc(m*sizeof(int)); /* errors for validation set */ double cur_loss, best_loss = DBL_MAX; int loss_iter; /* initializations */ spl_weight = init_spl_weight; /* solve biconvex self-paced learning problem */ primal_obj = alternate_convex_search(w, m, MAX_ITER, C, epsilon, fycache, ex, &sm, &sparm, valid_examples, spl_weight); printf("primal objective: %.4f\n", primal_obj); fflush(stdout); //alternate_convex_search(w, m, MAX_ITER, C, epsilon, fycache, ex, &sm, &sparm, valid_examples, spl_weight); int nValid = 0; for (i=0;i<m;i++) { if(valid_examples[i]) { nValid++; } } if(ntrain < alldata.n) { cur_loss = compute_current_loss(val,&sm,&sparm); printf("CURRENT LOSS: %f\n",cur_loss); } /* write structural model */ write_struct_model(modelfile, &sm, &sparm); // skip testing for the moment /* free memory */ free_struct_sample(alldata); if(ntrain < alldata.n) { free(sample.examples); free(val.examples); } free_struct_model(sm, &sparm); for(i=0;i<m;i++) { free_svector(fycache[i]); } free(fycache); free(valid_examples); return(0); }
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) #endif { long correct=0,incorrect=0,no_accuracy=0; long i; double t1,runtime=0; double avgloss=0,l; #ifndef COMPILE_MEX_INTERFACE FILE *predfl; #endif STRUCTMODEL model; STRUCT_LEARN_PARM sparm; STRUCT_TEST_STATS teststats; SAMPLE testsample; LABEL y; #ifdef COMPILE_MEX_INTERFACE int argc; char **argv; if (nrhs < 3) { print_help(); return; } else if (nrhs==3) { argc=1; argv=(char **)my_malloc(MAX_ARGVS*sizeof(char *)); argv[0]="OLR"; } else create_argc_argv(prhs[3],&argc,&argv); #endif svm_struct_classify_api_init(argc,argv); #ifndef COMPILE_MEX_INTERFACE read_input_parameters(argc,argv,testfile,modelfile,predictionsfile,&sparm, &verbosity,&struct_verbosity); #else read_input_parameters(argc,argv,&sparm,&verbosity,&struct_verbosity); #endif if(struct_verbosity>=1) { printf("Reading model..."); fflush(stdout); } #ifndef COMPILE_MEX_INTERFACE model=read_struct_model(modelfile,&sparm); #else model=read_struct_model(prhs[2],&sparm); #endif if(struct_verbosity>=1) { fprintf(stdout, "done.\n"); } if(model.svm_model->kernel_parm.kernel_type == LINEAR) { /* linear kernel */ /* compute weight vector */ add_weight_vector_to_linear_model(model.svm_model); model.w=model.svm_model->lin_weights; } if(struct_verbosity>=1) { printf("Reading test examples..."); fflush(stdout); } #ifndef COMPILE_MEX_INTERFACE testsample=read_struct_examples(testfile,&sparm); #else testsample=read_struct_examples(prhs,&sparm); #endif if(struct_verbosity>=1) { printf("done.\n"); fflush(stdout); } if(struct_verbosity>=1) { printf("Classifying test examples..."); fflush(stdout); } #ifndef COMPILE_MEX_INTERFACE if ((predfl = fopen (predictionsfile, "w")) == NULL) { perror (predictionsfile); exit (1); } #else mwSize rows=mxGetM(prhs[0]); mxArray *predictions=mxCreateDoubleMatrix(rows,1,mxREAL); double *pred_ptr=mxGetPr(predictions); #endif for(i=0;i<testsample.n;i++) { t1=get_runtime(); y=classify_struct_example(testsample.examples[i].x,&model,&sparm); runtime+=(get_runtime()-t1); #ifndef COMPILE_MEX_INTERFACE write_label(predfl,y); #else write_label(&pred_ptr,y); #endif l=loss(testsample.examples[i].y,y,&sparm); avgloss+=l; if(l == 0) correct++; else incorrect++; eval_prediction(i,testsample.examples[i],y,&model,&sparm,&teststats); if(empty_label(testsample.examples[i].y)) { no_accuracy=1; } /* test data is not labeled */ if(struct_verbosity>=2) { if((i+1) % 100 == 0) { printf("%ld..",i+1); fflush(stdout); } } free_label(y); } avgloss/=testsample.n; #ifndef COMPILE_MEX_INTERFACE fclose(predfl); #endif if(struct_verbosity>=1) { printf("done\n"); printf("Runtime (without IO) in cpu-seconds: %.2f\n", (float)(runtime/100.0)); } if((!no_accuracy) && (struct_verbosity>=1)) { printf("Average loss on test set: %.4f\n",(float)avgloss); printf("Zero/one-error on test set: %.2f%% (%ld correct, %ld incorrect, %d total)\n",(float)100.0*incorrect/testsample.n,correct,incorrect,testsample.n); } print_struct_testing_stats(testsample,&model,&sparm,&teststats); free_struct_sample(testsample); free_struct_model(model); svm_struct_classify_api_exit(); #ifndef COMPILE_MEX_INTERFACE return(0); #else plhs[0]=predictions; #endif }
int main (int argc, char* argv[]) { int o, i, j, cad_num; char filename[256]; CAD **cads, *cad; SAMPLE sample, sample_part; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; int rank; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* select GPU */ select_gpu(rank); svm_struct_learn_api_init(argc,argv); read_input_parameters(argc, argv, trainfile, cadfile, testfile, &verbosity, &struct_verbosity, &struct_parm, &learn_parm, &kernel_parm, &alg_type); /* read cad models */ cads = read_cad_model(cadfile, &cad_num, 0, &struct_parm); /* if cad_num == 1, train the final model */ if(cad_num == 1) { printf("Train the hierarchical model\n"); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = cads; printf("Read training samples\n"); sample = read_struct_examples(trainfile, &struct_parm, &structmodel); printf("Read training samples done\n"); if(struct_parm.is_root == 1 || struct_parm.is_aspectlet == 1) { /* first train weights for root parts */ printf("Train root models\n"); cad = cads[0]; struct_parm.cad_index = 0; /* for each part */ for(i = 0; i < cad->part_num; i++) { /* choose what parts to train */ if((cad->roots[i] == -1 && struct_parm.is_root == 1) || (cad->roots[i] == 1 && struct_parm.is_aspectlet == 1)) { printf("Train part %d\n", i); /* training iteration index */ struct_parm.iter = 0; struct_parm.part_index = i; /* select training samples for part */ if(rank == 0) select_examples_part(trainfile, sample, cad, 0, i); MPI_Barrier(MPI_COMM_WORLD); sample_part = read_struct_examples("temp_part.dat", &struct_parm, &structmodel); /* train the root template */ struct_parm.deep = 0; /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); /* if aspectlet, train the subtree */ if(cad->roots[i] == 1 && struct_parm.is_aspectlet == 1) { printf("Train subtree for part %d\n", i); struct_parm.deep = 1; struct_parm.iter++; /* select training samples for part */ if(rank == 0) select_examples_part(trainfile, sample, cad, 0, i); MPI_Barrier(MPI_COMM_WORLD); free_struct_sample(sample_part); sample_part = read_struct_examples("temp_part.dat", &struct_parm, &structmodel); free_struct_model(structmodel); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = cads; if(alg_type == 1) svm_learn_struct(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); } /* data mining hard examples */ for(j = 0; j < struct_parm.hard_negative; j++) { /* increase iteration number */ struct_parm.iter++; data_mining_hard_examples("temp_part.dat", testfile, &struct_parm, &structmodel); /* read the training examples */ if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } free_struct_sample(sample_part); sample_part = read_struct_examples("temp.dat", &struct_parm, &structmodel); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ free_struct_model(structmodel); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = cads; if(alg_type == 1) svm_learn_struct(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); } /* save constraints for training the full model */ if(rank == 0) save_constraints(&struct_parm, &structmodel); MPI_Barrier(MPI_COMM_WORLD); free_struct_sample(sample_part); free_struct_model(structmodel); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = cads; } } /* end for each part */ if(rank == 0) { write_constraints(struct_parm.cset, &struct_parm); free(struct_parm.cset.rhs); for(i = 0; i < struct_parm.cset.m; i++) free_example(struct_parm.cset.lhs[i], 1); free(struct_parm.cset.lhs); } MPI_Barrier(MPI_COMM_WORLD); } /* end if is_root == 1 */ /* train the full model */ printf("Train the full model\n"); struct_parm.iter = 0; struct_parm.cad_index = -1; struct_parm.part_index = -1; struct_parm.deep = 1; /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); /* data mining hard examples */ while(struct_parm.hard_negative > 0) { /* increase iteration number */ struct_parm.iter++; data_mining_hard_examples(trainfile, testfile, &struct_parm, &structmodel); /* read the training examples */ if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } free_struct_sample(sample); sample = read_struct_examples("temp.dat", &struct_parm, &structmodel); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ free_struct_model(structmodel); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = cads; if(alg_type == 1) svm_learn_struct(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); struct_parm.hard_negative--; } if(rank == 0) { if(struct_verbosity >= 1) { printf("Writing learned model..."); fflush(stdout); } sprintf(modelfile, "%s.mod", struct_parm.cls); write_struct_model(modelfile, &structmodel, &struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } } MPI_Barrier(MPI_COMM_WORLD); free_struct_sample(sample); free_struct_model(structmodel); } /* train weights for aspectlets */ /* start with the second aspectlet, the first one is the whole object */ for(o = 1; o < cad_num; o++) { printf("Train aspectlet %d\n", o); cad = cads[o]; /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = &cad; /* training iteration index */ struct_parm.iter = 0; struct_parm.cad_index = -1; struct_parm.part_index = -1; struct_parm.deep = 1; /* select training samples for the aspectlet */ if(rank == 0) select_examples_aspectlet(trainfile, cads, o); MPI_Barrier(MPI_COMM_WORLD); printf("Read training samples\n"); sample_part = read_struct_examples("temp_part.dat", &struct_parm, &structmodel); printf("Read training samples done\n"); /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); /* data mining hard examples */ for(i = 0; i < struct_parm.hard_negative; i++) { /* increase iteration number */ struct_parm.iter++; data_mining_hard_examples("temp_part.dat", testfile, &struct_parm, &structmodel); /* read the training examples */ if(struct_verbosity>=1) { printf("Reading training examples..."); fflush(stdout); } free_struct_sample(sample_part); sample_part = read_struct_examples("temp.dat", &struct_parm, &structmodel); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } /* Do the learning and return structmodel. */ free_struct_model(structmodel); /* set the cad model for structmodel */ structmodel.cad_num = 1; structmodel.cads = &cad; if(alg_type == 1) svm_learn_struct(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel); else if(alg_type == 2) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, PRIMAL_ALG); else if(alg_type == 3) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_ALG); else if(alg_type == 4) svm_learn_struct_joint(sample_part, &struct_parm, &learn_parm, &kernel_parm, &structmodel, DUAL_CACHE_ALG); else exit(1); } if(rank == 0) { if(struct_verbosity>=1) { printf("Writing learned model..."); fflush(stdout); } sprintf(modelfile, "%s_cad%03d.mod", struct_parm.cls, o); write_struct_model(modelfile, &structmodel, &struct_parm); if(struct_verbosity>=1) { printf("done\n"); fflush(stdout); } } MPI_Barrier(MPI_COMM_WORLD); free_struct_sample(sample_part); free_struct_model(structmodel); } /* end for each cad model */ for(i = 0; i < cad_num; i++) destroy_cad(cads[i]); free(cads); svm_struct_learn_api_exit(); MPI_Finalize(); return 0; }
int main(int argc, char* argv[]) { double avghingeloss; LABEL y; long i, correct; double weighted_correct; char testfile[1024]; char modelfile[1024]; char labelfile[1024]; char latentfile[1024]; char scorefile[1024]; FILE *flabel; FILE *flatent; FILE *fscore; STRUCTMODEL model; STRUCT_LEARN_PARM sparm; SAMPLE testsample; /* read input parameters */ read_input_parameters(argc,argv,testfile,modelfile,labelfile,latentfile,scorefile,model.kernel_info_file,model.filestub, &sparm); printf("C: %f\n",sparm.C); flabel = fopen(labelfile,"w"); flatent = fopen(latentfile,"w"); fscore = fopen(scorefile, "w"); init_struct_model(model.kernel_info_file, &model, &sparm); read_struct_model(modelfile, &model); /* read test examples */ printf("Reading test examples..."); fflush(stdout); testsample = read_struct_examples(testfile, &model, &sparm); printf("done.\n"); IMAGE_KERNEL_CACHE ** cached_images = init_cached_images(testsample.examples,&model); avghingeloss = 0.0; correct = 0; weighted_correct=0.0; int *valid_example_kernel = (int *) malloc(5*sizeof(int)); for(i = 0; i < model.num_kernels; i++) valid_example_kernel[i] = 1; double total_example_weight = 0; int num_distinct_examples = 0; int last_image_id = -1; LATENT_VAR h = make_latent_var(&model); double * scores = (double *)calloc(sparm.n_classes, sizeof(double)); for (i=0;i<testsample.n;i++) { while (testsample.examples[i].x.image_id == last_image_id) i++; last_image_id = testsample.examples[i].x.image_id; num_distinct_examples++; // if(finlatent) { // read_latent_var(&h,finlatent); //printf("%d %d\n",h.position_x,h.position_y); // } //printf("%f\n",sparm.C); struct timeval start_time; struct timeval finish_time; gettimeofday(&start_time, NULL); classify_struct_example(testsample.examples[i].x,&y,&h,cached_images,&model,&sparm,1); gettimeofday(&finish_time, NULL); double microseconds = 1e6 * (finish_time.tv_sec - start_time.tv_sec) + (finish_time.tv_usec - start_time.tv_usec); //printf("This ESS call took %f milliseconds.\n", microseconds/1e3); total_example_weight += testsample.examples[i].x.example_cost; //double hinge_l = get_hinge_l_from_pos_score(pos_score,testsample.examples[i].y); //printf("with a pos_score of %f, a label of %d we get a hinge_l of %f\n", pos_score, testsample.examples[i].y.label, hinge_l); // double weighted_hinge_l = hinge_l * testsample.examples[i].x.example_cost; //avghingeloss += weighted_hinge_l; //if (hinge_l<1) { //A classification is considered "correct" if it guesses one of the objects in the image if (y.label == testsample.examples[i].y.label || testsample.examples[i].x.also_correct[y.label]) { correct++; weighted_correct+=testsample.examples[i].x.example_cost; } print_label(y, flabel); fprintf(flabel,"\n"); fflush(flabel); print_latent_var(testsample.examples[i].x, h, flatent); get_class_scores(testsample.examples[i].x, cached_images, scores, &model, &sparm); fprintf(fscore, "%s ", testsample.examples[i].x.image_path); for (int j = 0; j < sparm.n_classes; ++j) { fprintf(fscore, "%f ", scores[j]); } fprintf(fscore, "\n"); } free_latent_var(h); fclose(flabel); fclose(flatent); free(scores); //double w_cost = regularizaton_cost(model.w_curr.get_vec(), model.sizePsi); //avghingeloss = avghingeloss/testsample.n; printf("\n"); //printf("Objective Value with C=%f is %f\n\n\n", sparm.C, (sparm.C * avghingeloss) + w_cost); //printf("Average hinge loss on dataset: %.4f\n", avghingeloss); printf("Zero/one error on test set: %.4f\n", 1.0 - ((float) correct) / (1.0 * num_distinct_examples)); printf("Weighted zero/one error on the test set %.4f\n", 1.0 - (weighted_correct/total_example_weight)); printf("zeroone %.4f weightedzeroone %.4f\n", 1.0 - ((float) correct) / (1.0 * num_distinct_examples), 1.0 - (weighted_correct/total_example_weight)); fclose(fscore); free_cached_images(cached_images, &model); //free_struct_sample(testsample); free_struct_model(model,&sparm); return(0); }
int main(int argc, char* argv[]) { double *w; /* weight vector */ int outer_iter; long m, i; double C, epsilon; LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; char trainfile[1024]; char modelfile[1024]; int MAX_ITER; /* new struct variables */ SVECTOR **fycache, *diff, *fy; EXAMPLE *ex; SAMPLE sample; STRUCT_LEARN_PARM sparm; STRUCTMODEL sm; //double decrement; double primal_obj;//, last_primal_obj; //double cooling_eps; //double stop_crit; DebugConfiguration::VerbosityLevel = VerbosityLevel::None; /* read input parameters */ my_read_input_parameters(argc, argv, trainfile, modelfile, &learn_parm, &kernel_parm, &sparm); epsilon = learn_parm.eps; C = learn_parm.svm_c; MAX_ITER = learn_parm.maxiter; /* read in examples */ sample = read_struct_examples(trainfile,&sparm); ex = sample.examples; m = sample.n; /* initialization */ init_struct_model(sample,&sm,&sparm,&learn_parm,&kernel_parm); w = sm.w; //w = create_nvector(sm.sizePsi); //clear_nvector(w, sm.sizePsi); //sm.w = w; /* establish link to w, as long as w does not change pointer */ /* some training information */ printf("C: %.8g\n", C); printf("epsilon: %.8g\n", epsilon); printf("sample.n: %ld\n", sample.n); printf("sm.sizePsi: %ld\n", sm.sizePsi); fflush(stdout); /* impute latent variable for first iteration */ init_latent_variables(&sample,&learn_parm,&sm,&sparm); /* prepare feature vector cache for correct labels with imputed latent variables */ fycache = (SVECTOR**)malloc(m*sizeof(SVECTOR*)); for (i=0; i<m; i++) { fy = psi(ex[i].x, ex[i].y, ex[i].h, &sm, &sparm); /* DEBUG */ printf("true_psi[%d]=", i); for (int j = 0; j < sm.sizePsi; ++j) printf("%.4lf ", fy->words[j].weight); printf("\n"); diff = add_list_ss(fy); free_svector(fy); fy = diff; fycache[i] = fy; } /* outer loop: latent variable imputation */ outer_iter = 1; //last_primal_obj = 0; //decrement = 0; //cooling_eps = 0.5*C*epsilon; //while ((outer_iter<=MIN_OUTER_ITER)||((!stop_crit)&&(outer_iter<MAX_OUTER_ITER))) { while (outer_iter<MAX_OUTER_ITER) { LearningTracker::NextOuterIteration(); printf("OUTER ITER %d\n", outer_iter); /* cutting plane algorithm */ primal_obj = cutting_plane_algorithm(w, m, MAX_ITER, C, /*cooling_eps, */fycache, ex, &sm, &sparm); /* compute decrement in objective in this outer iteration */ /* decrement = last_primal_obj - primal_obj; last_primal_obj = primal_obj; printf("primal objective: %.4f\n", primal_obj); printf("decrement: %.4f\n", decrement); fflush(stdout); stop_crit = (decrement<C*epsilon)&&(cooling_eps<0.5*C*epsilon+1E-8); cooling_eps = -decrement*0.01; cooling_eps = MAX(cooling_eps, 0.5*C*epsilon); printf("cooling_eps: %.8g\n", cooling_eps); */ /* print new weights */ printf("W="); for (i = 1; i <= sm.sizePsi; ++i) printf("%.3f ", sm.w[i]); printf("\n"); /* Save model */ char modelfile_tmp[1024]; sprintf(modelfile_tmp, "%s.%d", modelfile, outer_iter); write_struct_model(modelfile_tmp, &sm, &sparm); /* impute latent variable using updated weight vector */ for (i=0; i<m; i++) { free_latent_var(ex[i].h); ex[i].h = infer_latent_variables(ex[i].x, ex[i].y, &sm, &sparm); } /* re-compute feature vector cache */ for (i=0; i<m; i++) { free_svector(fycache[i]); fy = psi(ex[i].x, ex[i].y, ex[i].h, &sm, &sparm); /* DEBUG */ printf("true_psi[%d]=", i); for (int j = 0; j < sm.sizePsi; ++j) printf("%.4lf ", fy->words[j].weight); printf("\n"); diff = add_list_ss(fy); free_svector(fy); fy = diff; fycache[i] = fy; } outer_iter++; } // end outer loop /* write structural model */ write_struct_model(modelfile, &sm, &sparm); // skip testing for the moment /* free memory */ free_struct_sample(sample); free_struct_model(sm, &sparm); for(i=0; i<m; i++) { free_svector(fycache[i]); } free(fycache); return(0); }
void MedSTC::svmStructSolver(char *dataFileName, Params *param, double *res) { LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type = 2; /* set the parameters. */ set_init_param(&struct_parm, &learn_parm, &kernel_parm, &alg_type); struct_parm.C = m_dC; /* read the training examples */ SAMPLE sample = read_struct_examples(dataFileName, &struct_parm); if(param->SVM_ALGTYPE == 0) svm_learn_struct(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, NSLACK_ALG); //else if(alg_type == 1) // svm_learn_struct(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, NSLACK_SHRINK_ALG); else if(param->SVM_ALGTYPE == 2) { struct_parm.C = m_dC * sample.n; // Note: in n-slack formulation, C is not divided by N. svm_learn_struct_joint(sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, ONESLACK_PRIMAL_ALG); } else if ( param->SVM_ALGTYPE == 3 ) { struct_parm.C = m_dC * sample.n; // Note: in n-slack formulation, C is not divided by N. int nEtaNum = m_nLabelNum * m_nK; for ( int i=1; i<=nEtaNum; i++ ) { initEta_[i] = m_dEta[i-1]; } svm_learn_struct_joint( sample, &struct_parm, &learn_parm, &kernel_parm, &structmodel, ONESLACK_PRIMAL_ALG, initEta_, nEtaNum ); } /* get the optimal lagrangian multipliers. * Note: for 1-slack formulation: the "marginalization" is * needed for fast computation. */ int nVar = sample.n * m_nLabelNum; for ( int k=0; k<nVar; k++ ) m_dMu[k] = 0; if ( param->SVM_ALGTYPE == 0 ) { for ( int k=1; k<structmodel.svm_model->sv_num; k++ ) { int docnum = structmodel.svm_model->supvec[k]->orgDocNum; m_dMu[docnum] = structmodel.svm_model->alpha[k]; } } else if ( param->SVM_ALGTYPE == 2 ) { for ( int k=1; k<structmodel.svm_model->sv_num; k++ ) { int *vecLabel = structmodel.svm_model->supvec[k]->lvec; double dval = structmodel.svm_model->alpha[k] / sample.n; for ( int d=0; d<sample.n; d++ ) { int label = vecLabel[d]; m_dMu[d*m_nLabelNum + label] += dval; } } } else ; //FILE *fileptr = fopen("SVMLightSolution.txt", "a"); // set the SVM parameters. m_dB = structmodel.svm_model->b; for ( int y=0; y<m_nLabelNum; y++ ) { for ( int k=0; k<m_nK; k++ ){ int etaIx = y * m_nK + k; m_dEta[etaIx] = structmodel.w[etaIx+1]; } } m_dsvm_primalobj = structmodel.primalobj; // free the memory free_struct_sample(sample); free_struct_model(structmodel); }
void mexFunction (int nout, mxArray ** out, int nin, mxArray const ** in) { SAMPLE sample; /* training sample */ LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; STRUCT_LEARN_PARM struct_parm; STRUCTMODEL structmodel; int alg_type; enum {IN_ARGS=0, IN_SPARM} ; enum {OUT_W=0} ; /* SVM-light is not fully reentrant, so we need to run this patch first */ init_qp_solver() ; verbosity = 0 ; kernel_cache_statistic = 0 ; if (nin != 2) { mexErrMsgTxt("Two arguments required") ; } /* Parse ARGS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ char arg [1024 + 1] ; int argc ; char ** argv ; if (! uIsString(in[IN_ARGS], -1)) { mexErrMsgTxt("ARGS must be a string") ; } mxGetString(in[IN_ARGS], arg, sizeof(arg) / sizeof(char)) ; arg_split (arg, &argc, &argv) ; svm_struct_learn_api_init(argc+1, argv-1) ; read_input_parameters (argc+1,argv-1, &verbosity, &struct_verbosity, &struct_parm, &learn_parm, &kernel_parm, &alg_type ) ; if (kernel_parm.kernel_type != LINEAR && kernel_parm.kernel_type != CUSTOM) { mexErrMsgTxt ("Only LINEAR or CUSTOM kerneles are supported") ; } /* Parse SPARM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ mxArray const * sparm_array = in [IN_SPARM] ; mxArray const * patterns_array ; mxArray const * labels_array ; mxArray const * kernelFn_array ; int numExamples, ei ; if (! sparm_array) { mexErrMsgTxt("SPARM must be a structure") ; } struct_parm.mex = sparm_array ; patterns_array = mxGetField(sparm_array, 0, "patterns") ; if (! patterns_array || ! mxIsCell(patterns_array)) { mexErrMsgTxt("SPARM.PATTERNS must be a cell array") ; } numExamples = mxGetNumberOfElements(patterns_array) ; labels_array = mxGetField(sparm_array, 0, "labels") ; if (! labels_array || ! mxIsCell(labels_array) || ! mxGetNumberOfElements(labels_array) == numExamples) { mexErrMsgTxt("SPARM.LABELS must be a cell array " "with the same number of elements of " "SPARM.PATTERNS") ; } sample.n = numExamples ; sample.examples = (EXAMPLE *) my_malloc (sizeof(EXAMPLE) * numExamples) ; for (ei = 0 ; ei < numExamples ; ++ ei) { sample.examples[ei].x.mex = mxGetCell(patterns_array, ei) ; sample.examples[ei].y.mex = mxGetCell(labels_array, ei) ; sample.examples[ei].y.isOwner = 0 ; } if (struct_verbosity >= 1) { mexPrintf("There are %d training examples\n", numExamples) ; } kernelFn_array = mxGetField(sparm_array, 0, "kernelFn") ; if (! kernelFn_array && kernel_parm.kernel_type == CUSTOM) { mexErrMsgTxt("SPARM.KERNELFN must be define for CUSTOM kernels") ; } if (kernelFn_array) { MexKernelInfo * info ; if (mxGetClassID(kernelFn_array) != mxFUNCTION_CLASS) { mexErrMsgTxt("SPARM.KERNELFN must be a valid function handle") ; } info = (MexKernelInfo*) kernel_parm.custom ; info -> structParm = sparm_array ; info -> kernelFn = kernelFn_array ; } /* Learning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ switch (alg_type) { case 0: svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_ALG) ; break ; case 1: svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,NSLACK_SHRINK_ALG); break ; case 2: svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_PRIMAL_ALG); break ; case 3: svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_ALG); break ; case 4: svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,ONESLACK_DUAL_CACHE_ALG); break ; case 9: svm_learn_struct_joint_custom(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel); break ; default: mexErrMsgTxt("Unknown algorithm type") ; } /* Write output ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Warning: The model contains references to the original data 'docs'. If you want to free the original data, and only keep the model, you have to make a deep copy of 'model'. */ mxArray * model_array = newMxArrayEncapsulatingSmodel (&structmodel) ; out[OUT_W] = mxDuplicateArray (model_array) ; destroyMxArrayEncapsulatingSmodel (model_array) ; free_struct_sample (sample) ; free_struct_model (structmodel) ; svm_struct_learn_api_exit () ; free_qp_solver () ; }
int svm_struct_learn (int argc, char* argv[], SAMPLE *sample, STRUCTMODEL *model, STRUCT_LEARN_PARM *struct_parm) { char trainfile[200]; /* file with training examples */ char modelfile[200]; /* file for resulting classifier */ STRUCTMODEL structmodel; LEARN_PARM learn_parm; KERNEL_PARM kernel_parm; int alg_type; //SAMPLE _sample; //int i, j; HIDEO_ENV *hideo_env=create_env(); svm_struct_learn_api_init(argc,argv); svm_struct_read_input_parameters(argc,argv,trainfile,modelfile,&verbosity, &struct_verbosity,struct_parm,&learn_parm, &kernel_parm,&alg_type); // _sample=read_struct_examples("C:\\NewWork\\SvmLightLib\\SvmLightLibDemo\\Examples\\Multiclass\\train.dat",struct_parm); // // compare _sample to *sample // assert(_sample.n == sample->n); // for (i = 0; i < _sample.n; i++) // { // SVECTOR *vec_1 = _sample.examples[i].x.doc->fvec; // SVECTOR *vec_2 = sample->examples[i].x.doc->fvec; // assert(vec_1->factor == vec_2->factor); // assert(vec_1->kernel_id == vec_2->kernel_id); // assert(vec_1->next == vec_2->next); // assert(vec_1->twonorm_sq == vec_2->twonorm_sq); // assert(strcmp(vec_1->userdefined, vec_2->userdefined) == 0); // j = 0; // while (vec_1->words[j].wnum > 0 || vec_2->words[j].wnum > 0) // { // //printf("%d %d\n", vec_1->words[j].wnum, vec_2->words[j].wnum); // //printf("%f %f\n", vec_1->words[j].weight, vec_2->words[j].weight); // assert(vec_1->words[j].wnum == vec_2->words[j].wnum); // assert(vec_1->words[j].weight == vec_2->words[j].weight); // j++; // } // } // //sample=&_sample; /* Do the learning and return structmodel. */ if(alg_type == 1) svm_learn_struct(*sample,struct_parm,&learn_parm,&kernel_parm,&structmodel,hideo_env); else if(alg_type == 2) svm_learn_struct_joint(*sample,struct_parm,&learn_parm,&kernel_parm,&structmodel,PRIMAL_ALG,hideo_env); else if(alg_type == 3) svm_learn_struct_joint(*sample,struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_ALG,hideo_env); else if(alg_type == 4) svm_learn_struct_joint(*sample,struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_CACHE_ALG,hideo_env); else exit(1); /* Warning: The model contains references to the original data 'docs'. If you want to free the original data, and only keep the model, you have to make a deep copy of 'model'. */ model->sizePsi=structmodel.sizePsi; model->svm_model=copy_model(structmodel.svm_model); model->w=model->svm_model->lin_weights; free_struct_model(structmodel); svm_struct_learn_api_exit(); return 0; }