int train_fs(const char* input_file_name, const char* model_file_name){ // Initialization const char* error_msg; set_default_params(); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg){ fprintf(stderr,"Error: %s\n",error_msg); return -1; } // Do the cross-validation and save accuracy double accuracy = do_cross_validation(nr_fold); std::string info_fpath = std::string(model_file_name) + ".info"; FILE* info = fopen(info_fpath.c_str(), "w"); fprintf(info, "Accuracy : %f", accuracy); //fflush(info); fclose(info); // Train a model on the whole dataset model_train=train(&prob, ¶m); if(save_model(model_file_name, model_train)){ fprintf(stderr,"can't save model to file %s\n",model_file_name); return -1; } // Free resources destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"Error: %s\n",error_msg); exit(1); } if(flag_cross_validation) { do_cross_validation(); } else { model_=train(&prob, ¶m); save_model(model_file_name, model_); destroy_model(model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
// run the whole learning process of RBM void rbm::run(std::string model_weight_file, std::string model_visible_bias_file, std::string model_hidden_bias_file, std::string learned_features_file, std::string file){ load_data(file.c_str()); // randomly initialize weights and biases float ** weights = new float*[num_hidden_units]; float * hidden_bias = new float[num_hidden_units]; float * visible_bias = new float[num_visible_units]; for (int i=0; i<num_hidden_units; i++){ weights[i] = new float[num_visible_units]; } init_paras(weights, visible_bias, hidden_bias); // std::string in_weights = "init_weights"; // std::string in_hidden = "init_hidden"; // std::string in_visible = "init_visible"; // init_paras(weights, visible_bias, hidden_bias, in_weights.c_str(), in_hidden.c_str(), in_visible.c_str()); // train rbm train(weights, visible_bias, hidden_bias); // save model save_model(weights, visible_bias, hidden_bias, model_weight_file.c_str(), model_visible_bias_file.c_str(), model_hidden_bias_file.c_str()); for (int i=0; i<num_hidden_units; i++){ delete[] weights[i]; } delete[] weights; delete[] visible_bias; delete[] hidden_bias; }
int MainCanvas::qt_metacall(QMetaObject::Call _c, int _id, void **_a) { _id = QMainWindow::qt_metacall(_c, _id, _a); if (_id < 0) return _id; if (_c == QMetaObject::InvokeMetaMethod) { switch (_id) { case 0: tinChanged(); break; case 1: hfpChanged(); break; case 2: numclustersChanged((*reinterpret_cast< int(*)>(_a[1]))); break; case 3: open(); break; case 4: save_model(); break; case 5: save_scene(); break; case 6: closeMesh(); break; case 7: properties(); break; case 8: runhfp(); break; case 9: shuffleColors(); break; case 10: increaseNumClusters(); break; case 11: decreaseNumClusters(); break; case 12: showAllClusters(); break; case 13: showOneCluster(); break; case 14: about(); break; case 15: setSceneGraph(); break; case 16: checkNoTIN(); break; case 17: checkHFPStatus(); break; case 18: updateMatIndexes(); break; case 19: setNumClusters((*reinterpret_cast< int(*)>(_a[1]))); break; case 20: setNumClusters(); break; default: ; } _id -= 21; } return _id; }
void doRequires(zoneInfo_t* info, ZStream* buf, WeaponVariantDef* def) { #define save_material(mat) if(mat) mat = (Material*)requireAsset(info, ASSET_TYPE_MATERIAL, (char*)mat->name, buf); save_material(def->killIcon); save_material(def->dpadIcon); save_material(def->WeaponDef->reticleCenter); save_material(def->WeaponDef->reticleSide); save_material(def->WeaponDef->hudIcon); save_material(def->WeaponDef->pickupIcon); save_material(def->WeaponDef->ammoCounterIcon); save_material(def->WeaponDef->AdsOverlayShader); save_material(def->WeaponDef->AdsOverlayShaderLowRes); save_material(def->WeaponDef->AdsOverlayShaderEMP); save_material(def->WeaponDef->AdsOverlayShaderEMPLowRes); #undef save_material // xmodel #define save_model(mat) if(mat) mat = (XModel*)requireAsset(info, ASSET_TYPE_XMODEL, (char*)mat->name, buf); for (int i = 0; i < 16; i++) { save_model(def->WeaponDef->gunXModel[i]); } save_model(def->WeaponDef->handXModel); for (int i = 0; i < 16; i++) { save_model(def->WeaponDef->worldModel[i]); } save_model(def->WeaponDef->worldClipModel); save_model(def->WeaponDef->rocketModel); save_model(def->WeaponDef->knifeModel); save_model(def->WeaponDef->worldKnifeModel); save_model(def->WeaponDef->projectileModel); #undef save_model if (def->WeaponDef->collisions) def->WeaponDef->collisions = (PhysGeomList*)requireAsset(info, ASSET_TYPE_PHYS_COLLMAP, (char*)def->WeaponDef->collisions->name, buf); if (def->WeaponDef->tracer) def->WeaponDef->tracer = (Tracer*)requireAsset(info, ASSET_TYPE_TRACER, (char*)def->WeaponDef->tracer->name, buf); /* #define save_fx(fx) if(fx) fx = (FxEffectDef*)requireAsset(info, ASSET_TYPE_FX, (char*)fx->name, buf); save_fx(def->WeaponDef->viewFlashEffect); save_fx(def->WeaponDef->worldFlashEffect); save_fx(def->WeaponDef->viewShellEjectEffect); save_fx(def->WeaponDef->worldShellEjectEffect); save_fx(def->WeaponDef->viewLastShotEjectEffect); save_fx(def->WeaponDef->worldLastShotEjectEffect); save_fx(def->WeaponDef->projExplosionEffect); save_fx(def->WeaponDef->projDudEffect); save_fx(def->WeaponDef->projTrailEffect); save_fx(def->WeaponDef->projBeaconEffect); save_fx(def->WeaponDef->projIgnitionEffect); save_fx(def->WeaponDef->turretOverheatEffect); #undef save_fx */ }
int main(int argc, char* argv[]) { Exampler train; int i, mode = 0; double C = 1, verb = 10, tau = 0.0001; // parse options for(i=1;i<argc;i++) { if(argv[i][0] != '-') break; ++i; switch(argv[i-1][1]) { case 'c': C = atof(argv[i]); break; case 't': tau = atof(argv[i]); break; case 'm': mode = atoi(argv[i]); break; case 'v': verb = atof(argv[i]); break; default: fprintf(stderr,"unknown option\n"); } } // determine filenames if(i>=(argc - 1)) exit_with_help(); std::cout << "Loading Train Data " << std::endl; train.libsvm_load_data(argv[i], false); char* save_file = argv[i+1]; // CREATE int step = (int) ((double) train.nb_ex / (100 / verb)); std::cout << "\n--> Building with C = "<< C << std::endl; if (mode) std::cout << " BATCH Learning" << std::endl; else std::cout << " ONLINE Learning" << std::endl; Machine* svm = create_larank(); svm->C = C; svm->tau = tau; training(svm, train, step, mode); save_model(svm, save_file); delete svm; return 0; }
/** @todo JSON access errors lack specifier @todo Check that omegas ar filled/exits @todo Clean up nested checks for optional JSON arguments @todo Eigenvalues should be computed independent of json output, stored and used twice, for screen and file */ void run(const Settings &s) { // empty configurators unique_ptr<csmp::tperm::Configurator> mconf = make_unique<csmp::tperm::NullConfigurator>(); unique_ptr<csmp::tperm::Configurator> fconf = make_unique<csmp::tperm::NullConfigurator>(); // get matrix configurator Settings cs(s, "configuration"); if (cs.json.count("matrix")) { Settings mcs(cs, "matrix"); mconf = MatrixConfiguratorFactory().configurator(mcs); } // get fracture configurator if (cs.json.count("fractures")) { Settings fcs(cs, "fractures"); fconf = FractureConfiguratorFactory().configurator(fcs); } // get omega generator Settings acs(s, "analysis"); auto ogen = make_omega_generator(acs); // load model... Settings ms(s, "model"); auto model = load_model(ms); // configure material properties mconf->configure(*model); fconf->configure(*model); // sort boundaries auto bds = sort_boundaries(*model, s); // ready to solve solve(bds, *model); // generate omegas auto omegas = ogen->generate(*model); auto nomegas = named_omegas(omegas); // get upscaled tensors auto omega_tensors = fetch(*model, nomegas); // results string jres_fname = ""; if (s.json.count("output")) { Settings outs(s, "output"); if (outs.json.count("save final binary")) // write to csmp binary if (outs.json["save final binary"].get<string>() != "") save_model(*model, outs.json["save final binary"].get<string>().c_str()); if (outs.json.count("vtu")) { // write to vtu if (outs.json["vtu"].get<bool>()) { make_omega_regions(nomegas, *model); vtu(omega_tensors, *model); } if (outs.json.count("vtu regions")) vtu(outs.json["vtu regions"].get<vector<string>>(), *model); } if (outs.json.count("results file name")) // write to json jres_fname = s.json["output"]["results file name"].get<string>(); } report(omega_tensors, *model, jres_fname.c_str()); }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } if( flag_find_C && flag_warm_start) { fprintf(stderr,"ERROR: Option -C and -i can't both exist\n"); exit(1); } if (flag_find_C) { do_find_parameter_C(); } else if(flag_cross_validation) { do_cross_validation(); } else { if(flag_warm_start) { if(prob.n != initial_model->nr_feature) fprintf(stderr,"WARNING: The number of features in the input file does not match that in the initial model\n"); model_=warm_start_train(&prob, ¶m, initial_model); free_and_destroy_model(&initial_model); } else model_=train(&prob, ¶m); if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
int model::train() { if (specific_init()) return 1; std::chrono::high_resolution_clock::time_point ts, tn; std::cout << "Sampling " << n_iters << " iterations!" << std::endl; for (int iter = 1; iter <= n_iters; ++iter) { std::cout << "Iteration " << iter << " ..." << std::endl; ts = std::chrono::high_resolution_clock::now(); // for each document for (int m = 0; m < M; ++m) sampling(m); tn = std::chrono::high_resolution_clock::now(); time_ellapsed.push_back(std::chrono::duration_cast<std::chrono::milliseconds>(tn - ts).count()); #if COMP_LLH test(); #endif if (n_save > 0) { if (iter % n_save == 0) { // saving the model std::cout << "Saving the model at iteration " << iter << "..." << std::endl; save_model(iter); } } } std::cout << "Gibbs sampling completed!" << std::endl; std::cout << "Saving the final model!" << std::endl; save_model(-1); return 0; }
TEST_P(classifier_test, save_load_2) { std::string save_empty, save_test; // Test data datum pos; pos.num_values_.push_back(make_pair("value", 10.0)); datum neg; neg.num_values_.push_back(make_pair("value", -10.0)); // Save empty state save_model(classifier_->get_mixable_holder(), save_empty); // Train vector<pair<string, datum> > data; classifier_->train(make_pair("pos", pos)); classifier_->train(make_pair("neg", neg)); // Now, the classifier can classify properly ASSERT_EQ("pos", get_max_label(classifier_->classify(pos))); ASSERT_EQ("neg", get_max_label(classifier_->classify(neg))); // Save current state save_model(classifier_->get_mixable_holder(), save_test); // Load empty load_model(classifier_->get_mixable_holder(), save_empty); // And the classifier classify data improperly, but cannot expect results string pos_max = get_max_label(classifier_->classify(pos)); string neg_max = get_max_label(classifier_->classify(neg)); ASSERT_EQ(0, pos_max.compare(neg_max)); // Reload server load_model(classifier_->get_mixable_holder(), save_test); // The classifier works well ASSERT_EQ("pos", get_max_label(classifier_->classify(pos))); ASSERT_EQ("neg", get_max_label(classifier_->classify(neg))); }
/* print dongdu.model and dongdu.map */ void Machine::print() { string modelfile = PATH + "dongdu.model"; cout << "Save model file : " << modelfile << endl; save_model(modelfile.c_str(), _model); // write map file from strmap string mapfile = PATH + "dongdu.map"; cout << "Save map file : " << mapfile << endl; strmap.print(mapfile); return; }
void LVlinear_save_model(lvError *lvErr, const char *path_in, const LVlinear_model *model_in){ try{ errno = 0; // Convert LVsvm_model to svm_model auto mdl = std::make_unique<model>(); LVConvertModel(*model_in, *mdl); int err = save_model(path_in, mdl.get()); if (err == -1){ // Allocate room for output error message (truncated if buffer is too small) const size_t bufSz = 256; char buf[bufSz] = ""; std::string errstr; #if defined(_WIN32) || defined(_WIN64) if (strerror_s(buf, bufSz, errno) != 0) errstr = buf; else errstr = "Unknown error"; #elif (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && ! _GNU_SOURCE if (strerror_r(errno, buf, bufSz) != 0) errstr = buf; else errstr = "Unknown error"; #else char* gnuerr = strerror_r(errno, buf, bufSz); if (gnuerr != nullptr) errstr = gnuerr; else errstr = "Unknown error"; #endif errno = 0; throw LVException(__FILE__, __LINE__, "Model load operation failed (" + errstr + ")."); } } catch (LVException &ex) { ex.returnError(lvErr); } catch (std::exception &ex) { LVException::returnStdException(lvErr, __FILE__, __LINE__, ex); } catch (...) { LVException ex(__FILE__, __LINE__, "Unknown exception has occurred"); ex.returnError(lvErr); } }
// run the whole learning process of DNN void dnn::run(std::string model_weight_file, std::string model_bias_file) { // assign id to threads if (!thread_id.get()) { thread_id.reset(new int(thread_counter++)); } // get access to tables petuum::PSTableGroup::RegisterThread(); mat *weights= new mat[num_layers-1]; mat *biases=new mat[num_layers-1]; for(int i=0;i<num_layers-1;i++){ weights[i]=petuum::PSTableGroup::GetTableOrDie<float>(i); biases[i]=petuum::PSTableGroup::GetTableOrDie<float>(i+num_layers-1); } // Run additional iterations to let stale values finish propagating for (int iter = 0; iter < staleness; ++iter) { petuum::PSTableGroup::Clock(); } // initialize parameters if (client_id==0&&(*thread_id) == 0){ std::cout<<"init parameters"<<std::endl; init_paras(weights, biases); std::cout<<"init parameters done"<<std::endl; } process_barrier->wait(); // do DNN training if (client_id==0&&(*thread_id) == 0) std::cout<<"training starts"<<std::endl; train(weights, biases); // Run additional iterations to let stale values finish propagating for (int iter = 0; iter < staleness; ++iter) { petuum::PSTableGroup::Clock(); } //save model if(client_id==0&&(*thread_id)==0) { save_model(weights, biases, model_weight_file.c_str(), model_bias_file.c_str()); } delete[]weights; delete[]biases; petuum::PSTableGroup::DeregisterThread(); }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"Error: %s\n",error_msg); exit(1); } if(flag_cross_validation) { if (nr_fold <= 10) { do_cross_validation(); } else { double cv; nr_fold = nr_fold - 10; cv = binary_class_cross_validation(&prob, ¶m, nr_fold); printf("Cross Validation = %g%%\n",100.0*cv); } } else { model_=train(&prob, ¶m); if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(prob.W); free(x_space); free(line); return 0; }
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { struct model *linearmodel = (struct model*)malloc( 1*sizeof(struct model) ); //struct model *linearmodel; char *pFileName; const char *pErrorMsg; int status; if( nrhs != 2 ){ mexPrintf("mat2liblinear(model, 'output_name');\n"); plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); return; } if( !mxIsStruct(prhs[0]) ){ mexPrintf("model is not structure array\n"); plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); return; } if( !mxIsChar(prhs[1]) || mxGetM(prhs[1]) != 1 ){ mexPrintf("FileName is not char\n"); plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); return; } //convert matlab structure to c structure pErrorMsg = matlab_matrix_to_model(linearmodel, prhs[0]); if( linearmodel == NULL ){ mexPrintf("Can't read model: %s\n", pErrorMsg); plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); return; } //save model pFileName = mxArrayToString(prhs[1]); status = save_model(pFileName, linearmodel); if( status != 0 ){ mexWarnMsgTxt("While writing to file, error occured"); } free_and_destroy_model(&linearmodel); mxFree(pFileName); //return 0 or 1. 0:success, 1:failure plhs[0] = mxCreateDoubleScalar(status); return; }
void MedSTC::learn_svm(char *model_dir, const double &dC, const double &dEll) { char model_root[512]; sprintf(model_root, "%s/final", model_dir); load_model( model_root ); m_dC = dC; m_dDeltaEll = dEll; Params *param = new Params(); param->DELTA_ELL = m_dDeltaEll; param->LAMBDA = m_dLambda; param->RHO = m_dRho; param->INITIAL_C = m_dC; param->NLABELS = m_nLabelNum; param->NTOPICS = m_nK; param->SVM_ALGTYPE = 2; char filename[512]; get_train_filename( filename, model_dir, param ); svmStructSolver( filename, param, m_dMu ); // for testing. int nDataNum = 0; double dAcc = 0; get_test_filename( filename, model_dir, param ); readLowDimData( filename, nDataNum ); for ( int d=0; d<nDataNum; d++ ) { int predLabel = predict( theta_[d] ); if ( label_[d] == predLabel ) dAcc ++; } dAcc /= nDataNum; FILE *fileptr = fopen("overall-res.txt", "a"); fprintf(fileptr, "setup (K: %d; C: %.3f; fold: %d; ell: %.2f; lambda: %.2f; rho: %.4f; svm_alg: %d; maxIt: %d): accuracy %.3f; avgNonZeroWrdCode: %.5f\n", m_nK, m_dC, 0, dEll, m_dLambda, m_dRho, param->SVM_ALGTYPE, 0, dAcc, 0.0); fclose(fileptr); save_model( model_root, -1 ); for ( int d=0; d<nDataNum; d++ ) { free( theta_[d] ); } free( theta_ ); free( label_ ); }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); param.train_file = Malloc(char,1024); strcpy(param.train_file, input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } if(flag_cross_validation) { do_cross_validation(); } else { clock_t start_cpu, end_cpu; double cpu_time_used; start_cpu = clock(); model_=train(&prob, ¶m); end_cpu = clock(); cpu_time_used = ((double) (end_cpu - start_cpu)) / CLOCKS_PER_SEC; if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
TEST_P(classifier_test, save_load) { jubatus::util::math::random::mtrand rand(0); const size_t example_size = 1000; vector<pair<string, datum> > data; make_random_data(rand, data, example_size); for (size_t i = 0; i < example_size; i++) { classifier_->train(data[i]); } std::string save_data; save_model(classifier_->get_mixable_holder(), save_data); classifier_->clear(); load_model(classifier_->get_mixable_holder(), save_data); my_test(); }
vector<Scores> eval_model(int argc, char**argv,const Model_Builder&model_builder) { // train the model shared_ptr<Model> model = train_model(model_builder); // save the model. if(g_params.option_is_set("WRITE_MODEL")) { FileStorage save_model(params::out_dir() + "/model.yml",FileStorage::WRITE); save_model << "model"; bool model_wrote = model->write(save_model); if(!model_wrote) log_file << "warning: failed to write model!" << endl; save_model.release(); } // show the model log_im("final_model",model->show("final model")); // // testing phase // // test on video if(g_params.has_key("TEST_VIDEO")) test_model_oni_video(*model); // compute training error //auto result = eval_on_dirs(model, //vector<string>{params::synthetic_directory()}); // test on images if(g_params.has_key("TEST_IMAGES")) test_model_images(model); // test on directories //auto result = eval_on_dirs(model,testDirs = default_test_dirs();); //return reuslt; return vector<Scores>(); }
/* ---------------------------------------- * brief : report function for lr * x0 : the last theta result * x1 : the current theta result * _ds : the lr model struct * ---------------------------------------- */ int lr_repo(double *x0, double *x1, void *_ds) { REG * lr = (REG *)_ds; double val1 = lr_eval(x0, _ds); double val2 = lr_eval(x1, _ds); if (fabs(val2 - val1) <= lr->p.ftoler){ fprintf(stderr, "conv done exit\n"); return 1; } int i = ++lr->p.iterno; fprintf(stderr, "iter: %4d, loss: %.10f", i, val2); if (i % lr->p.savestep == 0){ double auc = lr_auc(x1, lr->train_ds); fprintf(stderr, ",train_auc: %.10f", auc); if (lr->test_ds){ auc = lr_auc(x1, lr->test_ds); fprintf(stderr, ",test_auc: %.10f", auc); } memmove(lr->x, x1, sizeof(double) * lr->c); save_model(lr, i); } fprintf(stderr, "\n"); return 0; }
bool XLMachineLearningLibliear::SaveModel(const std::string& filename) { save_model(filename.c_str(),this->Model); // save_model(filename.c_str(),this->Model); return true; }
/* * learn dictionary and find optimum code. */ int MedSTC::train(char* start, char* directory, Corpus* pC, Params *param) { m_dDeltaEll = param->DELTA_ELL; m_dLambda = param->LAMBDA; m_dRho = param->RHO; m_dGamma = m_dLambda; long runtime_start = get_runtime(); // allocate variational parameters double ***phi = (double***)malloc(sizeof(double**) * pC->num_docs); for ( int d=0; d<pC->num_docs; d++ ) { phi[d] = (double**)malloc(sizeof(double*)*pC->docs[d].length); for (int n=0; n<pC->docs[d].length; n++) { phi[d][n] = (double*)malloc(sizeof(double) * param->NTOPICS); } } double **theta = (double**)malloc(sizeof(double*)*(pC->num_docs)); for (int d=0; d<pC->num_docs; d++) { theta[d] = (double*)malloc(sizeof(double) * param->NTOPICS); } for ( int d=0; d<pC->num_docs; d++ ) { init_phi(&(pC->docs[d]), phi[d], theta[d], param); } // initialize model if (strcmp(start, "random")==0) { new_model(pC->num_docs, pC->num_terms, param->NTOPICS, param->NLABELS, param->INITIAL_C); init_param( pC ); } else { load_model(start); m_dC = param->INITIAL_C; } strcpy(m_directory, directory); char filename[100]; // run expectation maximization sprintf(filename, "%s/lhood.dat", directory); FILE* lhood_file = fopen(filename, "w"); Document *pDoc = NULL; double dobj, obj_old = 1, converged = 1; int nIt = 0; while (((converged < 0) || (converged > param->EM_CONVERGED) || (nIt <= 2)) && (nIt <= param->EM_MAX_ITER)) { dobj = 0; double dLogLoss = 0; for ( int d=0; d<pC->num_docs; d++ ) { pDoc = &(pC->docs[d]); dobj += sparse_coding( pDoc, d, param, theta[d], phi[d] ); dLogLoss += m_dLogLoss; } // m-step dict_learn(pC, theta, phi, param, false); if ( param->SUPERVISED == 1 ) { // for supervised MedLDA. char buff[512]; get_train_filename( buff, m_directory, param ); outputLowDimData( buff, pC, theta ); svmStructSolver(buff, param, m_dMu); if ( param->PRIMALSVM == 1 ) { // solve svm in the primal form for ( int d=0; d<pC->num_docs; d++ ) { loss_aug_predict( &(pC->docs[d]), theta[d] ); } } dobj += m_dsvm_primalobj; } else ; // check for convergence converged = fabs(1 - dobj / obj_old); obj_old = dobj; // output model and lhood if ( param->SUPERVISED == 1 ) { fprintf(lhood_file, "%10.10f\t%10.10f\t%5.5e\t%.5f\n", dobj-m_dsvm_primalobj, dobj, converged, dLogLoss); } else { fprintf(lhood_file, "%10.10f\t%5.5e\t%.5f\n", dobj, converged, dLogLoss); } fflush(lhood_file); if ( nIt > 0 && (nIt % LAG) == 0) { sprintf( filename, "%s/%d", directory, nIt + 1); save_model( filename, -1 ); sprintf( filename, "%s/%d.theta", directory, nIt + 1 ); save_theta( filename, theta, pC->num_docs, m_nK ); } nIt ++; } // learn the final SVM. if ( param->SUPERVISED == 0 ) { char buff[512]; get_train_filename(buff, m_directory, param); outputLowDimData(buff, pC, theta); svmStructSolver(buff, param, m_dMu); } long runtime_end = get_runtime(); double dTrainTime = ((double)runtime_end-(double)runtime_start) / 100.0; // output the final model sprintf( filename, "%s/final", directory); save_model( filename, dTrainTime ); // output the word assignments (for visualization) int nNum = 0, nAcc = 0; sprintf(filename, "%s/word-assignments.dat", directory); FILE* w_asgn_file = fopen(filename, "w"); for (int d=0; d<pC->num_docs; d++) { sparse_coding( &(pC->docs[d]), d, param, theta[d], phi[d] ); write_word_assignment(w_asgn_file, &(pC->docs[d]), phi[d]); nNum ++; pC->docs[d].predlabel = predict(theta[d]); if ( pC->docs[d].gndlabel == pC->docs[d].predlabel ) nAcc ++; } fclose(w_asgn_file); fclose(lhood_file); sprintf(filename,"%s/train.theta",directory); save_theta(filename, theta, pC->num_docs, m_nK); for (int d=0; d<pC->num_docs; d++) { free( theta[d] ); for (int n=0; n<pC->docs[d].length; n++) free( phi[d][n] ); free( phi[d] ); } free( theta ); free( phi ); return nIt; }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; /* * Some bookkeeping variables for MPI. The 'rank' of a process is its numeric id * in the process pool. For example, if we run a program via `mpirun -np 4 foo', then * the process ranks are 0 through 3. Here, N and size are the total number of processes * running (in this example, 4). */ start_t = time(NULL); MPI_Init(&argc, &argv); // Initialize the MPI execution environment MPI_Comm_rank(MPI_COMM_WORLD, ¶m.rank); // Determine current running process MPI_Comm_size(MPI_COMM_WORLD, ¶m.size); // Total number of processes //double N = (double) size; // Number of subsystems/slaves for ADMM if (param.rank==param.root) printf ("Number of subsystems: %d \n", param.size); parse_command_line(argc, argv, input_file_name, model_file_name); // Read the meta data bprob.read_metadata(input_file_name); bprob.set_bias(bias); error_msg = block_check_parameter(&bprob,¶m); if(error_msg) { fprintf(stderr,"Error: %s\n",error_msg); exit(1); } if (param.rank==param.root) { if (param.solver_type == L2R_L2LOSS_SVC) printf("ADMM + Primal trust region Newton's method for L2 loss SVM:\n"); else if (param.solver_type == L2R_L2LOSS_SVC_DUAL) printf("ADMM + Dual coordinate descent for L2 loss SVM: \n"); else if (param.solver_type == L2R_L1LOSS_SVC_DUAL) printf("ADMM + Dual coordinate descent for L1 loss SVM:\n"); else printf("Not supported. \n"); } srand(1); // Now read the local data problem * prob = read_problem(&bprob, ¶m); if(flag_cross_validation) do_cross_validation(prob); else { model_=block_train(prob, ¶m); save_model(model_file_name, model_); free_and_destroy_model(&model_); } destroy_param(¶m); MPI_Finalize(); return 0; }
void SVMLinear::saveModel(string pathFile) { save_model(pathFile.c_str(), modelLinearSVM); }
//---------------------------- global variables ------------------------------- int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; #ifdef FIGURE56 char test_file_name[1024]; parse_command_line(argc, argv, input_file_name, test_file_name); #else parse_command_line(argc, argv, input_file_name, model_file_name);//initialize global struct param, according to commond line //_parse_command_line(argc, argv, input_file_name, model_file_name);//initialize global struct param, according to commond line #endif read_problem(input_file_name);//get all possible information about the train file into global struct prob #ifdef FIGURE56 read_problem_test(test_file_name); #endif error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } // struct model //{ // struct parameter param; // int nr_class; /* number of classes */ // int nr_feature; // double *w; // int *label; /* label of each class */ //}; // model_=train(&prob, ¶m); //--------apply memory for V matrix-------------- int i=0; double * p = Malloc(double,param.col_size * prob.l); //srand( (unsigned)time( NULL ) ); //种子函数 for (i=0;i<param.col_size * prob.l;i++) { p[i]=rand()/(RAND_MAX+1.0); //产生随机数的函数 //p[i]=rand(); } double ** v_pp = Malloc(double* ,prob.l); param.v_pp = v_pp; for (i=0;i<prob.l;i++) param.v_pp[i] = &p[param.col_size * i]; model_=_train(&prob, ¶m); #ifdef FIGURE56 #else if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } #endif free_and_destroy_model(&model_); destroy_param(¶m); free(prob.y); free(prob.x); free(prob.query); free(x_space); ////////free the variable free(v_pp); free(p); #ifdef FIGURE56 free(probtest.y); free(probtest.x); free(x_spacetest); #endif free(line); return 0; }
void * addWeaponVariantDef(zoneInfo_t* info, const char* name, char* data, int dataLen) { WeaponVariantDef* ret; if (dataLen > 0) ret = BG_LoadWeaponDef_LoadObj(data); else ret = (WeaponVariantDef*)data; // now add strings to our list // hidetags for (int i = 0; i < 32; i++) { if (ret->hideTags[i] == NULL) break; // no more tags ret->hideTags[i] = addScriptString(info, SL_ConvertToString(ret->hideTags[i])); } // notetrackSounds for (int i = 0; i < 16; i++) { if (ret->WeaponDef->noteTrackSoundMap[0][i] == NULL) break; // no more tags ret->WeaponDef->noteTrackSoundMap[0][i] = addScriptString(info, SL_ConvertToString(ret->WeaponDef->noteTrackSoundMap[0][i])); } for (int i = 0; i < 16; i++) { if (ret->WeaponDef->noteTrackSoundMap[1][i] == NULL) break; // no more tags ret->WeaponDef->noteTrackSoundMap[1][i] = addScriptString(info, SL_ConvertToString(ret->WeaponDef->noteTrackSoundMap[1][i])); } // notetrackRumble for (int i = 0; i < 16; i++) { if (ret->WeaponDef->noteTrackRumbleMap[0][i] == NULL) break; // no more tags ret->WeaponDef->noteTrackRumbleMap[0][i] = addScriptString(info, SL_ConvertToString(ret->WeaponDef->noteTrackRumbleMap[0][i])); } for (int i = 0; i < 16; i++) { if (ret->WeaponDef->noteTrackSoundMap[1][i] == NULL) break; // no more tags ret->WeaponDef->noteTrackRumbleMap[1][i] = addScriptString(info, SL_ConvertToString(ret->WeaponDef->noteTrackRumbleMap[1][i])); } // now require all sub-assets // materials #define save_material(mat) if (mat) addAsset(info, ASSET_TYPE_MATERIAL, mat->name, addMaterial(info, mat->name, (char*)mat, -1)); save_material(ret->killIcon); save_material(ret->dpadIcon); save_material(ret->WeaponDef->reticleCenter); save_material(ret->WeaponDef->reticleSide); save_material(ret->WeaponDef->hudIcon); save_material(ret->WeaponDef->pickupIcon); save_material(ret->WeaponDef->ammoCounterIcon); save_material(ret->WeaponDef->AdsOverlayShader); save_material(ret->WeaponDef->AdsOverlayShaderLowRes); save_material(ret->WeaponDef->AdsOverlayShaderEMP); save_material(ret->WeaponDef->AdsOverlayShaderEMPLowRes); #undef save_material // xmodel #define save_model(model) if (model) addAsset(info, ASSET_TYPE_XMODEL, model->name, addXModel(info, model->name, (char*)model, -1)); for (int i = 0; i < 16; i++) { save_model(ret->WeaponDef->gunXModel[i]); } save_model(ret->WeaponDef->handXModel); for (int i = 0; i < 16; i++) { save_model(ret->WeaponDef->worldModel[i]); } save_model(ret->WeaponDef->worldClipModel); save_model(ret->WeaponDef->rocketModel); save_model(ret->WeaponDef->knifeModel); save_model(ret->WeaponDef->worldKnifeModel); save_model(ret->WeaponDef->projectileModel); #undef save_model if (ret->WeaponDef->collisions) { addAsset(info, ASSET_TYPE_PHYS_COLLMAP, ret->WeaponDef->collisions->name, addPhysCollmap(info, ret->WeaponDef->collisions->name, (char*)ret->WeaponDef->collisions, -1)); } if (ret->WeaponDef->tracer) { addAsset(info, ASSET_TYPE_TRACER, ret->WeaponDef->tracer->name, addTracer(info, ret->WeaponDef->tracer->name, (char*)ret->WeaponDef->tracer, -1)); } // fx // null these for now because I'm not certain effect writing works #define save_fx(model) if (model) addAsset(info, ASSET_TYPE_FX, model->name, addFxEffectDef(info, model->name, (char*)model, -1)); /* save_fx(ret->WeaponDef->viewFlashEffect); save_fx(ret->WeaponDef->worldFlashEffect); save_fx(ret->WeaponDef->viewShellEjectEffect); save_fx(ret->WeaponDef->worldShellEjectEffect); save_fx(ret->WeaponDef->viewLastShotEjectEffect); save_fx(ret->WeaponDef->worldLastShotEjectEffect); save_fx(ret->WeaponDef->projExplosionEffect); save_fx(ret->WeaponDef->projDudEffect); save_fx(ret->WeaponDef->projTrailEffect); save_fx(ret->WeaponDef->projBeaconEffect); save_fx(ret->WeaponDef->projIgnitionEffect); save_fx(ret->WeaponDef->turretOverheatEffect); */ #undef save_fx ret->WeaponDef->viewFlashEffect = NULL; ret->WeaponDef->worldFlashEffect = NULL; ret->WeaponDef->viewShellEjectEffect = NULL; ret->WeaponDef->worldShellEjectEffect = NULL; ret->WeaponDef->viewLastShotEjectEffect = NULL; ret->WeaponDef->worldLastShotEjectEffect = NULL; ret->WeaponDef->projExplosionEffect = NULL; ret->WeaponDef->projDudEffect = NULL; ret->WeaponDef->projTrailEffect = NULL; ret->WeaponDef->projBeaconEffect = NULL; ret->WeaponDef->projIgnitionEffect = NULL; ret->WeaponDef->turretOverheatEffect = NULL; return ret; }
int main(int argc, char *argv[]) { std::string proto_file; std::string model_file ; std::string output_tmfile; int res; while((res=getopt(argc,argv,"p:m:o:h")) != -1) { switch(res) { case 'p': proto_file = optarg; break; case 'm': model_file = optarg; break; case 'o': output_tmfile = optarg; break; case 'h': std::cout << "[Usage]: " << argv[0] << " [-h] [-p proto_file] [-m model_file] [-o output_tmfile]\n"; return 0; default: break; } } if(proto_file.empty()) { std::cout << "Please specify the -p option to indicate the input proto file.\n"; return -1; } if(model_file.empty()) { std::cout << "Please specify the -m option to indicate the input model file.\n"; return -1; } if(output_tmfile.empty()) { std::cout << "Please specify the -o option to indicate the output tengine model file.\n"; return -1; } // init tengine init_tengine_library(); if(request_tengine_version("0.1") < 0) return 1; // load caffe model std::string model_name = "temp_model"; if (load_model(model_name.c_str(), "caffe", proto_file.c_str(), model_file.c_str()) < 0) { std::cout << "Load caffe model failed.\n"; return -1; } // create runtime graph graph_t graph = create_runtime_graph("graph", model_name.c_str(), NULL); if (!check_graph_valid(graph)) { std::cout << "Create graph0 failed.\n"; return -1; } // Save the tengine model file if(save_model(graph, "tengine", output_tmfile.c_str()) == -1) { std::cout << "Create tengine model file failed.\n"; return -1; } std::cout << "Create tengine model file done: "<<output_tmfile<<"\n"; destroy_runtime_graph(graph); remove_model(model_name.c_str()); release_tengine_library(); return 0; }
int main(int argc, char **argv) { #ifdef GPU int dev = findCudaDevice(argc, (const char **) argv); if (dev == -1) return 0; if (cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { fprintf(stdout, "CUBLAS initialization failed!\n"); cudaDeviceReset(); exit(EXIT_FAILURE); } #endif // GPU char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); time_t t1 = clock(); read_problem(input_file_name); time_t t2 = clock(); printf("reading the input file took %f seconds.\n", float(t2-t1)/CLOCKS_PER_SEC); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } if(flag_cross_validation) { do_cross_validation(); } else { model_=train(&prob, ¶m); if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); #ifdef GPU cublasDestroy(handle); cudaDeviceReset(); #endif // GPU printf("reading the input file took %f seconds.\n", float(t2-t1)/CLOCKS_PER_SEC); return 0; }