CMulticlassLabels* CRelaxedTree::apply_multiclass(CFeatures* data) { if (data != NULL) { CDenseFeatures<float64_t> *feats = dynamic_cast<CDenseFeatures<float64_t>*>(data); REQUIRE(feats != NULL, ("Require non-NULL dense features of float64_t\n")) set_features(feats); } // init kernels for all sub-machines for (int32_t i=0; i<m_machines->get_num_elements(); i++) { CSVM *machine = (CSVM*)m_machines->get_element(i); CKernel *kernel = machine->get_kernel(); CFeatures* lhs = kernel->get_lhs(); kernel->init(lhs, m_feats); SG_UNREF(machine); SG_UNREF(kernel); SG_UNREF(lhs); } CMulticlassLabels *lab = new CMulticlassLabels(m_feats->get_num_vectors()); SG_REF(lab); for (int32_t i=0; i < lab->get_num_labels(); ++i) { lab->set_int_label(i, int32_t(apply_one(i))); } return lab; }
bool CCombinedKernel::delete_optimization() { CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { if (k->has_property(KP_LINADD)) k->delete_optimization(); SG_UNREF(k); k = get_next_kernel(current); } SG_FREE(sv_idx); sv_idx = NULL; SG_FREE(sv_weight); sv_weight = NULL; sv_count = 0; set_is_initialized(false); return true; }
void CCombinedKernel::compute_batch( int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec, int32_t* IDX, float64_t* weights, float64_t factor) { ASSERT(num_vec<=get_num_vec_rhs()) ASSERT(num_vec>0); ASSERT(vec_idx); ASSERT(result); //we have to do the optimization business ourselves but lets //make sure we start cleanly delete_optimization(); CListElement* current = NULL ; CKernel * k = get_first_kernel(current) ; while(k) { if (k && k->has_property(KP_BATCHEVALUATION)) { if (k->get_combined_kernel_weight()!=0) k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight()); } else emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights); SG_UNREF(k); k = get_next_kernel(current); } //clean up delete_optimization(); }
void CCombinedKernel::set_subkernel_weights(SGVector<float64_t> weights) { if (append_subkernel_weights) { int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { int32_t num = k->get_num_subkernels() ; ASSERT(i<weights.vlen); k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false)); SG_UNREF(k); k = get_next_kernel(current); i += num ; } } else { int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { ASSERT(i<weights.vlen); k->set_combined_kernel_weight(weights.vector[i]); SG_UNREF(k); k = get_next_kernel(current); i++ ; } } }
int main (void) { // TODO: implement all destructors used in CKernel, otherwise cannot return from main() CKernel Kernel; if (!Kernel.Initialize ()) { DisableInterrupts (); for (;;); return EXIT_HALT; } TShutdownMode ShutdownMode = Kernel.Run (); switch (ShutdownMode) { case ShutdownReboot: reboot (); return EXIT_REBOOT; case ShutdownHalt: default: DisableInterrupts (); for (;;); return EXIT_HALT; } }
void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight) { CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { k->add_to_normal(idx, weight); SG_UNREF(k); k = get_next_kernel(current); } set_is_initialized(true) ; }
void CCombinedKernel::clear_normal() { CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { k->clear_normal() ; SG_UNREF(k); k = get_next_kernel(current); } set_is_initialized(true) ; }
void CCombinedKernel::set_optimization_type(EOptimizationType t) { CKernel* k = get_first_kernel(); while(k) { k->set_optimization_type(t); SG_UNREF(k); k = get_next_kernel(); } CKernel::set_optimization_type(t); }
void CKernel::Start(void) { if (m_dwMagic != CKERNEL_MAGIC) { kprintf("[e] Error! My constructor wasn't called properly.\n"); return; } kprintf("wat\n"); m_Logger = new CLogger(); Alentours::CPCIManager::Initialize(); CKernelML4 *ML4 = new CKernelML4(); CTask *KernelTask = new CTask(*ML4); CScheduler::AddTask(KernelTask); kprintf("[i] Kernel task has PID %i.\n", KernelTask->GetPID()); ML4->Apply(); KernelTask->UseStack([](CTask *Task) { kprintf("[i] Switched to Tier1 stack\n"); // After enabling, only CScheduler::* calls are allowed for API CScheduler::Enable(); kprintf("[i] Enabled scheduler.\n"); g_Kernel.SpawnThreads(); CScheduler::Exit(); }); }
const float64_t* CCombinedKernel::get_subkernel_weights(int32_t& num_weights) { SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n"); num_weights = get_num_subkernels() ; SG_FREE(subkernel_weights_buffer); subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights); if (append_subkernel_weights) { SG_DEBUG("appending kernel weights\n"); int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { int32_t num = -1 ; const float64_t *w = k->get_subkernel_weights(num); ASSERT(num==k->get_num_subkernels()); for (int32_t j=0; j<num; j++) subkernel_weights_buffer[i+j]=w[j] ; SG_UNREF(k); k = get_next_kernel(current); i += num ; } } else { SG_DEBUG("not appending kernel weights\n"); int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { subkernel_weights_buffer[i] = k->get_combined_kernel_weight(); SG_UNREF(k); k = get_next_kernel(current); i++ ; } } SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n"); return subkernel_weights_buffer ; }
void CCombinedKernel::list_kernels() { CKernel* k; SG_INFO( "BEGIN COMBINED KERNEL LIST - "); this->list_kernel(); CListElement* current = NULL ; k=get_first_kernel(current); while (k) { k->list_kernel(); SG_UNREF(k); k=get_next_kernel(current); } SG_INFO( "END COMBINED KERNEL LIST - "); }
// assumes that all constraints are satisfied float64_t CMKL::compute_mkl_dual_objective() { int32_t n=get_num_support_vectors(); float64_t mkl_obj=0; if (labels && kernel && kernel->get_kernel_type() == K_COMBINED) { CKernel* kn = ((CCombinedKernel*)kernel)->get_first_kernel(); while (kn) { float64_t sum=0; for (int32_t i=0; i<n; i++) { int32_t ii=get_support_vector(i); for (int32_t j=0; j<n; j++) { int32_t jj=get_support_vector(j); sum+=get_alpha(i)*get_alpha(j)*kn->kernel(ii,jj); } } if (mkl_norm==1.0) mkl_obj = CMath::max(mkl_obj, sum); else mkl_obj += CMath::pow(sum, mkl_norm/(mkl_norm-1)); SG_UNREF(kn); kn = ((CCombinedKernel*) kernel)->get_next_kernel(); } if (mkl_norm==1.0) mkl_obj=-0.5*mkl_obj; else mkl_obj= -0.5*CMath::pow(mkl_obj, (mkl_norm-1)/mkl_norm); mkl_obj+=compute_sum_alpha(); } else SG_ERROR( "cannot compute objective, labels or kernel not set\n"); return -mkl_obj; }
int main(int argc, char* argv[]) { std::cout << "test-suite-metrics (SoDA tool)" << std::endl; options_description desc("Options"); desc.add_options() ("help,h", "Prints help message") ("create-json-file,j", "Creates a sample json file") ("list-cluster-algorithms,c", "Lists the cluster algorithms") ("list-metric-plugins,m", "Lists the metric plugins"); variables_map vm; store(parse_command_line(argc, argv, desc), vm); notify(vm); if (argc < 2) { std::cerr << "[ERROR] There are no arguments!" << std::endl; printHelp(); return 1; } if (vm.count("help")) { printHelp(); std::cout << desc << std::endl; return 0; } if (vm.count("list-cluster-algorithms")) { printPluginNames("cluster", kernel.getTestSuiteClusterPluginManager().getPluginNames()); return 0; } if (vm.count("list-metric-plugins")) { printPluginNames("metric", kernel.getTestSuiteMetricPluginManager().getPluginNames()); return 0; } if (vm.count("create-json-file")) { createJsonFile(); return 0; } return loadJsonFiles(String(argv[1])); }
int main(int argc, char *argv[]) { CLog log; log.Init(Debug, "TestApp"); LogInfo("Starting app (%s)", g_strBuildStamp.c_str()); CProfiler profile("main", 3, Info); CKernel *pKernel = CKernel::Create(); pKernel->AddTask(CTimerTask::Create(10)); pKernel->AddTask(CInputTask::Create(20)); pKernel->AddTask(CInterpolatorTask::Create(30)); pKernel->AddTask(CTriggerTask::Create(40)); pKernel->AddTask(CCameraTask::Create(50)); pKernel->AddTask(CConsoleTask::Create(9000)); pKernel->AddTask(CVideoTask::Create(10000)); pKernel->AddTask(CAppTask::Create(100)); pKernel->Execute(); pKernel->Destroy(); LogInfo("Closing app"); return 0; }
int main (void) { // cannot return here because some destructors used in CKernel are not implemented if (!Kernel.Initialize ()) { halt (); return EXIT_HALT; } TShutdownMode ShutdownMode = Kernel.Run (); switch (ShutdownMode) { case ShutdownReboot: reboot (); return EXIT_REBOOT; case ShutdownHalt: default: halt (); return EXIT_HALT; } }
void calculateMetric(CSelectionData *selectionData, const std::string &name, rapidjson::Document &results) { ITestSuiteMetricPlugin *metric = kernel.getTestSuiteMetricPluginManager().getPlugin(name); StringVector dependencies = metric->getDependency(); for (StringVector::iterator it = dependencies.begin(); it != dependencies.end(); it++) { if (metricsCalculated.find(*it) == metricsCalculated.end()) { calculateMetric(selectionData, *it, results); } } (std::cerr << "[INFO] Calculating metrics: " << metric->getName() << " ...").flush(); metric->init(selectionData, &clusterList, revision); metric->calculate(results); metricsCalculated.insert(name); (std::cerr << " done." << std::endl).flush(); }
void CCombinedKernel::compute_by_subkernel( int32_t idx, float64_t * subkernel_contrib) { if (append_subkernel_weights) { int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { int32_t num = -1 ; k->get_subkernel_weights(num); if (num>1) k->compute_by_subkernel(idx, &subkernel_contrib[i]) ; else subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ; SG_UNREF(k); k = get_next_kernel(current); i += num ; } } else { int32_t i=0 ; CListElement* current = NULL ; CKernel* k = get_first_kernel(current); while(k) { if (k->get_combined_kernel_weight()!=0) subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ; SG_UNREF(k); k = get_next_kernel(current); i++ ; } } }
void CGaussianProcessRegression::update_kernel_matrices() { CKernel* kernel = NULL; if (m_method) kernel = m_method->get_kernel(); if (kernel) { float64_t m_scale = m_method->get_scale(); kernel->cleanup(); if (m_method->get_latent_features()) kernel->init(m_method->get_latent_features(), m_data); else kernel->init(m_data, m_data); //K(X_test, X_train) m_k_trts = kernel->get_kernel_matrix(); for (index_t i = 0; i < m_k_trts.num_rows; i++) { for (index_t j = 0; j < m_k_trts.num_cols; j++) m_k_trts(i,j) *= (m_scale*m_scale); } kernel->cleanup(); kernel->init(m_data, m_data); m_k_tsts = kernel->get_kernel_matrix(); for (index_t i = 0; i < m_k_tsts.num_rows; i++) { for (index_t j = 0; j < m_k_tsts.num_cols; j++) m_k_tsts(i,j) *= (m_scale*m_scale); } SG_UNREF(kernel); } }
int main(int argc, char* argv[]) { cout << "test-suite-reduction (SoDA tool)" << endl; options_description desc("Options"); desc.add_options() ("help,h", "Prints help message") ("create-json-file,j", "Creates a sample json file") ("list-algorithms,l", "Lists the reduction algorithms"); variables_map vm; store(parse_command_line(argc, argv, desc), vm); notify(vm); if (argc < 2) { cerr << "[ERROR] There are no arguments!" << endl; printHelp(); return 1; } if (vm.count("help")) { printHelp(); cout << desc << endl; return 0; } if (vm.count("list-algorithms")) { printPluginNames(kernel.getTestSuiteReductionPluginManager().getPluginNames()); return 0; } if (vm.count("create-json-file")) { createJsonFile(); return 0; } return loadJsonFiles(String(argv[1])); }
SGMatrix<float64_t> CCombinedKernel::get_parameter_gradient(TParameter* param, CSGObject* obj, index_t index) { SGMatrix<float64_t> result(0,0); if (strcmp(param->m_name, "combined_kernel_weight") == 0) { CListElement* current = NULL ; CKernel* k = get_first_kernel(current); if (append_subkernel_weights) { while(k) { result = k->get_parameter_gradient(param, obj, index); SG_UNREF(k); if (result.num_cols*result.num_rows > 0) return result; k = get_next_kernel(current); } } else { while(k) { if(obj == k) { result = k->get_kernel_matrix(); SG_UNREF(k); return result; } SG_UNREF(k); k = get_next_kernel(current); } } } else { CListElement* current = NULL ; CKernel* k = get_first_kernel(current); float64_t coeff; while(k) { SGMatrix<float64_t> derivative = k->get_parameter_gradient(param, obj, index); coeff = 1.0; if (!append_subkernel_weights) coeff = k->get_combined_kernel_weight(); for (index_t g = 0; g < derivative.num_rows; g++) { for (index_t h = 0; h < derivative.num_cols; h++) derivative(g,h) *= coeff; } if (derivative.num_cols*derivative.num_rows > 0) { if (result.num_cols == 0 && result.num_rows == 0) result = derivative; else { for (index_t g = 0; g < derivative.num_rows; g++) { for (index_t h = 0; h < derivative.num_cols; h++) result(g,h) += derivative(g,h); } } } SG_UNREF(k); k = get_next_kernel(current); } } return result; }
void processJsonFiles(String path) { try { std::cout << "[INFO] Processing " << path << " configuration file." << endl; boost::filesystem::path jsonPath(path); CSelectionData selectionData; CJsonReader reader = CJsonReader(path); String programName = reader.getStringFromProperty("program-name"); if (programName.empty()) { std::cerr << "[ERROR] Program name is missing in configuration file:" << path << std::endl; return; } StringVector reductionList = reader.getStringVectorFromProperty("reduction-method"); if (reductionList.empty()) { std::cerr << "[ERROR] reduction-algorithm is missing from the configuration file(" << path << ")." << std::endl; printPluginNames(kernel.getTestSuiteReductionPluginManager().getPluginNames()); return; } else { int iteration = reader.getIntFromProperty("iteration"); for (StringVector::const_iterator it = reductionList.begin(); it != reductionList.end(); ++it) { if (*it == "duplation" && !iteration) { std::cerr << "[ERROR] Missing iteration parameter for duplation reduction method in configuration file: " << path << "." << std::endl; return; } if (*it == "random" && !iteration && reader.getIntVectorFromProperty("reduction-sizes").empty()) { std::cerr << "[ERROR] Missing iteration or reduction-sizes parameter for random reduction method in configuration file: " << path << "." << std::endl; return; } try { kernel.getTestSuiteReductionPluginManager().getPlugin(*it); } catch (std::out_of_range &e) { std::cerr << "[ERROR] Invalid reduction algorithm name(" << *it << ") in configuration file: " << path << "." << std::endl; return; } } } String covPath = reader.getStringFromProperty("coverage-data"); if (covPath[0] == '.') { covPath = jsonPath.parent_path().string() + "/" + covPath; } String resPath = reader.getStringFromProperty("results-data"); if (resPath[0] == '.') { resPath = jsonPath.parent_path().string() + "/" + resPath; } if (exists(covPath) && exists(resPath)) { (std::cerr << "[INFO] loading coverage from " << covPath << " ...").flush(); selectionData.loadCoverage(covPath); (std::cerr << " done\n[INFO] loading results from " << resPath << " ...").flush(); selectionData.loadResults(resPath); (std::cerr << " done" << std::endl).flush(); } else { std::cerr << "[ERROR] Missing or invalid input files in config file " << path << "." << std::endl; return; } if (reader.getBoolFromProperty("globalize")) { // Globalize data. (std::cerr << "[INFO] Globalizing ... ").flush(); selectionData.globalize(); selectionData.filterToCoverage(); (std::cerr << " done" << std::endl).flush(); } String dirPath = reader.getStringFromProperty("output-dir"); if (dirPath[0] == '.') { dirPath = jsonPath.parent_path().string() + "/" + dirPath; reader.setProperty("output-dir", dirPath); } if (!(boost::filesystem::exists(dirPath))) { boost::filesystem::create_directory(dirPath); } boost::filesystem::path p = boost::filesystem::path(covPath).filename(); while (!reductionList.empty()) { string reductionMethod = reductionList.back(); reductionList.pop_back(); ITestSuiteReductionPlugin *plugin = NULL; std::ofstream outStream((dirPath + "/" + reductionMethod + "-" + p.string() + ".reduced").c_str()); if (!outStream.good()) { throw CException("Reduction output file error.", reader.getStringFromProperty("coverage-data") + ".reduced"); } try { plugin = kernel.getTestSuiteReductionPluginManager().getPlugin(reductionMethod); plugin->init(&selectionData, reader); } catch (std::out_of_range &e) { std::cerr << "[ERROR] Unknown reduction mode. " << std::endl; printPluginNames(kernel.getTestSuiteReductionPluginManager().getPluginNames()); return; } plugin->reduction(outStream); outStream.close(); } } catch (std::exception &e) { std::cerr << e.what() << std::endl; return; } catch (...) { std::cerr << "Exception of unknown type while processsing configuration file(" << path << ") arguments." << std::endl; return; } return; }
void CGUIFeatures::invalidate_test() { CKernel *k = ui->ui_kernel->get_kernel(); if (k) k->remove_rhs(); }
extern "C" void registerPlugin(CKernel &kernel) { kernel.getTestSuitePrioritizationPluginManager().addPlugin(new GeneralIgnorePrioritizationPlugin()); }
SGVector<int32_t> CRelaxedTree::train_node_with_initialization(const CRelaxedTree::entry_t &mu_entry, SGVector<int32_t> classes, CSVM *svm) { SGVector<int32_t> mu(classes.vlen), prev_mu(classes.vlen); mu.zero(); mu[mu_entry.first.first] = 1; mu[mu_entry.first.second] = -1; SGVector<int32_t> long_mu(m_num_classes); svm->set_C(m_svm_C, m_svm_C); svm->set_epsilon(m_svm_epsilon); for (int32_t iiter=0; iiter < m_max_num_iter; ++iiter) { long_mu.zero(); for (int32_t i=0; i < classes.vlen; ++i) { if (mu[i] == 1) long_mu[classes[i]] = 1; else if (mu[i] == -1) long_mu[classes[i]] = -1; } SGVector<int32_t> subset(m_feats->get_num_vectors()); SGVector<float64_t> binlab(m_feats->get_num_vectors()); int32_t k=0; CMulticlassLabels *labs = dynamic_cast<CMulticlassLabels *>(m_labels); for (int32_t i=0; i < binlab.vlen; ++i) { int32_t lab = labs->get_int_label(i); binlab[i] = long_mu[lab]; if (long_mu[lab] != 0) subset[k++] = i; } subset.vlen = k; CBinaryLabels *binary_labels = new CBinaryLabels(binlab); SG_REF(binary_labels); binary_labels->add_subset(subset); m_feats->add_subset(subset); CKernel *kernel = (CKernel *)m_kernel->shallow_copy(); kernel->init(m_feats, m_feats); svm->set_kernel(kernel); svm->set_labels(binary_labels); svm->train(); binary_labels->remove_subset(); m_feats->remove_subset(); SG_UNREF(binary_labels); std::copy(&mu[0], &mu[mu.vlen], &prev_mu[0]); mu = color_label_space(svm, classes); bool bbreak = true; for (int32_t i=0; i < mu.vlen; ++i) { if (mu[i] != prev_mu[i]) { bbreak = false; break; } } if (bbreak) break; } return mu; }
bool CSVMLightOneClass::train_machine(CFeatures* data) { //certain setup params mkl_converged=false; verbosity=1 ; init_margin=0.15; init_iter=500; precision_violations=0; opt_precision=DEF_PRECISION; strcpy (learn_parm->predfile, ""); learn_parm->biased_hyperplane=0; learn_parm->sharedslack=0; learn_parm->remove_inconsistent=0; learn_parm->skip_final_opt_check=0; learn_parm->svm_maxqpsize=get_qpsize(); learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize-1; learn_parm->maxiter=100000; learn_parm->svm_iter_to_shrink=100; learn_parm->svm_c=C1; learn_parm->transduction_posratio=0.33; learn_parm->svm_costratio=C2/C1; learn_parm->svm_costratio_unlab=1.0; learn_parm->svm_unlabbound=1E-5; learn_parm->epsilon_crit=epsilon; // GU: better decrease it ... ?? learn_parm->epsilon_a=1E-15; learn_parm->compute_loo=0; learn_parm->rho=1.0; learn_parm->xa_depth=0; if (!kernel) SG_ERROR( "SVM_light can not proceed without kernel!\n"); if (data) kernel->init(data, data); if (!kernel->has_features()) SG_ERROR( "SVM_light can not proceed without initialized kernel!\n"); int32_t num_vec=kernel->get_num_vec_lhs(); SG_INFO("num_vec=%d\n", num_vec); SG_UNREF(labels); labels=new CLabels(num_vec); labels->set_to_one(); // in case of LINADD enabled kernels cleanup! if (kernel->has_property(KP_LINADD) && get_linadd_enabled()) kernel->clear_normal() ; // output some info SG_DEBUG( "threads = %i\n", parallel->get_num_threads()) ; SG_DEBUG( "qpsize = %i\n", learn_parm->svm_maxqpsize) ; SG_DEBUG( "epsilon = %1.1e\n", learn_parm->epsilon_crit) ; SG_DEBUG( "kernel->has_property(KP_LINADD) = %i\n", kernel->has_property(KP_LINADD)) ; SG_DEBUG( "kernel->has_property(KP_KERNCOMBINATION) = %i\n", kernel->has_property(KP_KERNCOMBINATION)) ; SG_DEBUG( "kernel->has_property(KP_BATCHEVALUATION) = %i\n", kernel->has_property(KP_BATCHEVALUATION)) ; SG_DEBUG( "kernel->get_optimization_type() = %s\n", kernel->get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" : "SLOWBUTMEMEFFICIENT" ) ; SG_DEBUG( "get_solver_type() = %i\n", get_solver_type()); SG_DEBUG( "get_linadd_enabled() = %i\n", get_linadd_enabled()) ; SG_DEBUG( "get_batch_computation_enabled() = %i\n", get_batch_computation_enabled()) ; SG_DEBUG( "kernel->get_num_subkernels() = %i\n", kernel->get_num_subkernels()) ; use_kernel_cache = !((kernel->get_kernel_type() == K_CUSTOM) || (get_linadd_enabled() && kernel->has_property(KP_LINADD))); SG_DEBUG( "use_kernel_cache = %i\n", use_kernel_cache) ; if (kernel->get_kernel_type() == K_COMBINED) { CKernel* kn = ((CCombinedKernel*)kernel)->get_first_kernel(); while (kn) { // allocate kernel cache but clean up beforehand kn->resize_kernel_cache(kn->get_cache_size()); SG_UNREF(kn); kn = ((CCombinedKernel*) kernel)->get_next_kernel(); } } kernel->resize_kernel_cache(kernel->get_cache_size()); // train the svm svm_learn(); // brain damaged svm light work around create_new_model(model->sv_num-1); set_bias(-model->b); for (int32_t i=0; i<model->sv_num-1; i++) { set_alpha(i, model->alpha[i+1]); set_support_vector(i, model->supvec[i+1]); } // in case of LINADD enabled kernels cleanup! if (kernel->has_property(KP_LINADD) && get_linadd_enabled()) { kernel->clear_normal() ; kernel->delete_optimization() ; } if (use_kernel_cache) kernel->kernel_cache_cleanup(); return true ; }
void processJsonFiles(String path) { try { std::cout << "[INFO] Processing " << path << " configuration file." << std::endl; rapidjson::Document reader; { FILE *in = fopen (path.c_str(), "r"); char readBuffer[65536]; rapidjson::FileReadStream is(in, readBuffer, sizeof(readBuffer)); reader.ParseStream<0, rapidjson::UTF8<>, rapidjson::FileReadStream>(is); fclose(in); } boost::filesystem::path jsonPath(path); std::string clusterAlgorithmName = reader["cluster-algorithm"].GetString(); ITestSuiteClusterPlugin *clusterAlgorithm = kernel.getTestSuiteClusterPluginManager().getPlugin(clusterAlgorithmName); clusterAlgorithm->init(reader); CSelectionData *selectionData = new CSelectionData(); StringVector metrics; for (rapidjson::Value::ConstValueIterator itr = reader["metrics"].Begin(); itr != reader["metrics"].End(); ++itr) metrics.push_back(itr->GetString()); if (metrics.empty()) { std::cerr << "[ERROR] Missing metrics parameter in config file " << path << "." << std::endl; return; } else { for (StringVector::const_iterator it = metrics.begin(); it != metrics.end(); ++it) { try { kernel.getTestSuiteMetricPluginManager().getPlugin(*it); } catch (std::out_of_range &e) { std::cerr << "[ERROR] Invalid metric name(" << *it << ") in configuration file: " << path << "." << std::endl; return; } } } revision = reader["revision"].GetInt(); outputDir = reader["output-dir"].GetString(); if (outputDir.empty()) { std::cerr << "[ERROR] Missing output-dir parameter in config file " << path << "." << std::endl; return; } if (outputDir[0] == '.') outputDir = jsonPath.parent_path().string() + "/" + outputDir; if (!exists(outputDir)) boost::filesystem::create_directory(boost::filesystem::path(outputDir)); String covPath = reader["coverage-data"].GetString(); if (covPath[0] == '.') { covPath = jsonPath.parent_path().string() + "/" + covPath; } String resPath = reader["results-data"].GetString(); if (resPath[0] == '.') { resPath = jsonPath.parent_path().string() + "/" + resPath; } if (exists(covPath) && exists(resPath)) { (std::cerr << "[INFO] loading coverage from " << covPath << " ...").flush(); selectionData->loadCoverage(covPath); (std::cerr << " done\n[INFO] loading results from " << resPath << " ...").flush(); selectionData->loadResults(resPath); (std::cerr << " done" << std::endl).flush(); } else { std::cerr << "[ERROR] Missing or invalid input files in config file " << path << "." << std::endl; return; } if (reader["globalize"].GetBool()) { (std::cerr << "[INFO] Globalizing ...").flush(); selectionData->globalize(); (std::cerr << " done" << std::endl).flush(); } clusterList.clear(); metricsCalculated.clear(); (std::cerr << "[INFO] Running cluster algorithm: " << clusterAlgorithm->getName() << " ...").flush(); clusterAlgorithm->execute(*selectionData, clusterList); (std::cerr << " done" << std::endl).flush(); rapidjson::Document results; results.SetObject(); for (StringVector::iterator it = metrics.begin(); it != metrics.end(); it++) { if (metricsCalculated.find(*it) == metricsCalculated.end()) { calculateMetric(selectionData, *it, results); } } saveResults(results); delete selectionData; } catch (std::exception &e) { std::cerr << e.what() << std::endl; return; } catch (...) { std::cerr << "Exception of unknown type while processsing configuration file(" << path << ") arguments." << std::endl; return; } return; }
extern "C" void registerPlugin(CKernel &kernel) { kernel.getFaultLocalizationTechniquePluginManager().addPlugin(new TarantulaFaultLocalizationTechniquePlugin()); }
void CSVRLight::update_linear_component_mkl( int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a, float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin, float64_t *aicache, float64_t* c) { int32_t num = totdoc; int32_t num_weights = -1; int32_t num_kernels = kernel->get_num_subkernels() ; const float64_t* old_beta = kernel->get_subkernel_weights(num_weights); ASSERT(num_weights==num_kernels); if ((kernel->get_kernel_type()==K_COMBINED) && (!((CCombinedKernel*)kernel)->get_append_subkernel_weights()))// for combined kernel { CCombinedKernel* k = (CCombinedKernel*) kernel; CKernel* kn = k->get_first_kernel() ; int32_t n = 0, i, j ; while (kn!=NULL) { for(i=0;i<num;i++) { if(a[i] != a_old[i]) { kn->get_kernel_row(i,NULL,aicache, true); for(j=0;j<num;j++) W[j*num_kernels+n]+=(a[i]-a_old[i])*aicache[regression_fix_index(j)]*(float64_t)label[i]; } } SG_UNREF(kn); kn = k->get_next_kernel(); n++ ; } } else // hope the kernel is fast ... { float64_t* w_backup = SG_MALLOC(float64_t, num_kernels); float64_t* w1 = SG_MALLOC(float64_t, num_kernels); // backup and set to zero for (int32_t i=0; i<num_kernels; i++) { w_backup[i] = old_beta[i] ; w1[i]=0.0 ; } for (int32_t n=0; n<num_kernels; n++) { w1[n]=1.0 ; kernel->set_subkernel_weights(SGVector<float64_t>(w1, num_weights)) ; for(int32_t i=0;i<num;i++) { if(a[i] != a_old[i]) { for(int32_t j=0;j<num;j++) W[j*num_kernels+n]+=(a[i]-a_old[i])*compute_kernel(i,j)*(float64_t)label[i]; } } w1[n]=0.0 ; } // restore old weights kernel->set_subkernel_weights(SGVector<float64_t>(w_backup,num_weights)); SG_FREE(w_backup); SG_FREE(w1); } call_mkl_callback(a, label, lin, c, totdoc); }
void CSVRLight::svr_learn() { int32_t *inconsistent, i, j; int32_t upsupvecnum; float64_t maxdiff, *lin, *c, *a; int32_t iterations; float64_t *xi_fullset; /* buffer for storing xi on full sample in loo */ float64_t *a_fullset; /* buffer for storing alpha on full sample in loo */ TIMING timing_profile; SHRINK_STATE shrink_state; int32_t* label; int32_t* docs; ASSERT(m_labels); int32_t totdoc=m_labels->get_num_labels(); num_vectors=totdoc; // set up regression problem in standard form docs=SG_MALLOC(int32_t, 2*totdoc); label=SG_MALLOC(int32_t, 2*totdoc); c = SG_MALLOC(float64_t, 2*totdoc); for(i=0;i<totdoc;i++) { docs[i]=i; j=2*totdoc-1-i; label[i]=+1; c[i]=m_labels->get_label(i); docs[j]=j; label[j]=-1; c[j]=m_labels->get_label(i); } totdoc*=2; //prepare kernel cache for regression (i.e. cachelines are twice of current size) kernel->resize_kernel_cache( kernel->get_cache_size(), true); if (kernel->get_kernel_type() == K_COMBINED) { CCombinedKernel* k = (CCombinedKernel*) kernel; CKernel* kn = k->get_first_kernel(); while (kn) { kn->resize_kernel_cache( kernel->get_cache_size(), true); SG_UNREF(kn); kn = k->get_next_kernel(); } } timing_profile.time_kernel=0; timing_profile.time_opti=0; timing_profile.time_shrink=0; timing_profile.time_update=0; timing_profile.time_model=0; timing_profile.time_check=0; timing_profile.time_select=0; SG_FREE(W); W=NULL; if (kernel->has_property(KP_KERNCOMBINATION) && callback) { W = SG_MALLOC(float64_t, totdoc*kernel->get_num_subkernels()); for (i=0; i<totdoc*kernel->get_num_subkernels(); i++) W[i]=0; } /* make sure -n value is reasonable */ if((learn_parm->svm_newvarsinqp < 2) || (learn_parm->svm_newvarsinqp > learn_parm->svm_maxqpsize)) { learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize; } init_shrink_state(&shrink_state,totdoc,(int32_t)MAXSHRINK); inconsistent = SG_MALLOC(int32_t, totdoc); a = SG_MALLOC(float64_t, totdoc); a_fullset = SG_MALLOC(float64_t, totdoc); xi_fullset = SG_MALLOC(float64_t, totdoc); lin = SG_MALLOC(float64_t, totdoc); learn_parm->svm_cost = SG_MALLOC(float64_t, totdoc); if (m_linear_term.vlen>0) learn_parm->eps=get_linear_term_array(); else { learn_parm->eps=SG_MALLOC(float64_t, totdoc); /* equivalent regression epsilon for classification */ CMath::fill_vector(learn_parm->eps, totdoc, tube_epsilon); } SG_FREE(model->supvec); SG_FREE(model->alpha); SG_FREE(model->index); model->supvec = SG_MALLOC(int32_t, totdoc+2); model->alpha = SG_MALLOC(float64_t, totdoc+2); model->index = SG_MALLOC(int32_t, totdoc+2); model->at_upper_bound=0; model->b=0; model->supvec[0]=0; /* element 0 reserved and empty for now */ model->alpha[0]=0; model->totdoc=totdoc; model->kernel=kernel; model->sv_num=1; model->loo_error=-1; model->loo_recall=-1; model->loo_precision=-1; model->xa_error=-1; model->xa_recall=-1; model->xa_precision=-1; for(i=0;i<totdoc;i++) { /* various inits */ inconsistent[i]=0; a[i]=0; lin[i]=0; if(label[i] > 0) { learn_parm->svm_cost[i]=learn_parm->svm_c*learn_parm->svm_costratio* fabs((float64_t)label[i]); } else if(label[i] < 0) { learn_parm->svm_cost[i]=learn_parm->svm_c*fabs((float64_t)label[i]); } else ASSERT(false); } if(verbosity==1) { SG_DEBUG( "Optimizing...\n"); } /* train the svm */ SG_DEBUG( "num_train: %d\n", totdoc); iterations=optimize_to_convergence(docs,label,totdoc, &shrink_state,inconsistent,a,lin, c,&timing_profile, &maxdiff,(int32_t)-1, (int32_t)1); if(verbosity>=1) { SG_DONE(); SG_INFO("(%ld iterations)\n",iterations); SG_INFO( "Optimization finished (maxdiff=%.8f).\n",maxdiff); SG_INFO( "obj = %.16f, rho = %.16f\n",get_objective(),model->b); upsupvecnum=0; SG_DEBUG( "num sv: %d\n", model->sv_num); for(i=1;i<model->sv_num;i++) { if(fabs(model->alpha[i]) >= (learn_parm->svm_cost[model->supvec[i]]- learn_parm->epsilon_a)) upsupvecnum++; } SG_INFO( "Number of SV: %ld (including %ld at upper bound)\n", model->sv_num-1,upsupvecnum); } /* this makes sure the model we return does not contain pointers to the temporary documents */ for(i=1;i<model->sv_num;i++) { j=model->supvec[i]; if(j >= (totdoc/2)) { j=totdoc-j-1; } model->supvec[i]=j; } shrink_state_cleanup(&shrink_state); SG_FREE(label); SG_FREE(inconsistent); SG_FREE(c); SG_FREE(a); SG_FREE(a_fullset); SG_FREE(xi_fullset); SG_FREE(lin); SG_FREE(learn_parm->svm_cost); SG_FREE(docs); }
extern "C" void registerPlugin(CKernel &kernel) { kernel.getTestSuiteReductionPluginManager().addPlugin(new RandomReductionPlugin()); }