/* -------------------------------------------------------------- Usage: exitflag = qpbsvm_prloqo(m_UB, m_dim, m_tmax, m_tolabs, m_tolrel, m_tolKKT, x, Nabla, &t, &History, verb ) -------------------------------------------------------------- */ int32_t CQPBSVMLib::qpbsvm_cplex(float64_t *x, float64_t *Nabla, int32_t *ptr_t, float64_t **ptr_History, int32_t verb) { float64_t* lb=new float64_t[m_dim]; float64_t* ub=new float64_t[m_dim]; for (int32_t i=0; i<m_dim; i++) { lb[i]=0; ub[i]=m_UB; } CCplex cplex; cplex.init(E_QP); cplex.setup_lp(m_f, NULL, 0, m_dim, NULL, lb, ub); cplex.setup_qp(m_H, m_dim); cplex.optimize(x); cplex.cleanup(); delete[] lb; delete[] ub; *ptr_t=0; *ptr_History=NULL; return 0; }
bool CLPM::train_machine(CFeatures* data) { ASSERT(labels); if (data) { if (!data->has_property(FP_DOT)) SG_ERROR("Specified features are not of type CDotFeatures\n"); set_features((CDotFeatures*) data); } ASSERT(features); int32_t num_train_labels=labels->get_num_labels(); int32_t num_feat=features->get_dim_feature_space(); int32_t num_vec=features->get_num_vectors(); ASSERT(num_vec==num_train_labels); SG_FREE(w); w=SG_MALLOC(float64_t, num_feat); w_dim=num_feat; int32_t num_params=1+2*num_feat+num_vec; //b,w+,w-,xi float64_t* params=SG_MALLOC(float64_t, num_params); memset(params,0,sizeof(float64_t)*num_params); CCplex solver; solver.init(E_LINEAR); SG_INFO("C=%f\n", C1); solver.setup_lpm(C1, (CSparseFeatures<float64_t>*) features, labels, get_bias_enabled()); if (get_max_train_time()>0) solver.set_time_limit(get_max_train_time()); bool result=solver.optimize(params); solver.cleanup(); set_bias(params[0]); for (int32_t i=0; i<num_feat; i++) w[i]=params[1+i]-params[1+num_feat+i]; //#define LPM_DEBUG #ifdef LPM_DEBUG CMath::display_vector(params,num_params, "params"); SG_PRINT("bias=%f\n", bias); CMath::display_vector(w,w_dim, "w"); CMath::display_vector(¶ms[1],w_dim, "w+"); CMath::display_vector(¶ms[1+w_dim],w_dim, "w-"); #endif SG_FREE(params); return result; }
bool CLPBoost::train(CFeatures* data) { ASSERT(labels); ASSERT(features); int32_t num_train_labels=labels->get_num_labels(); int32_t num_feat=features->get_dim_feature_space(); int32_t num_vec=features->get_num_vectors(); ASSERT(num_vec==num_train_labels); delete[] w; w=new float64_t[num_feat]; memset(w,0,sizeof(float64_t)*num_feat); w_dim=num_feat; CCplex solver; solver.init(E_LINEAR); SG_PRINT("setting up lpboost\n"); solver.setup_lpboost(C1, num_vec); SG_PRINT("finished setting up lpboost\n"); float64_t result=init(num_vec); ASSERT(result); int32_t num_hypothesis=0; CTime time; CSignal::clear_cancel(); while (!(CSignal::cancel_computations())) { int32_t max_dim=0; float64_t violator=find_max_violator(max_dim); SG_PRINT("iteration:%06d violator: %10.17f (>1.0) chosen: %d\n", num_hypothesis, violator, max_dim); if (violator <= 1.0+epsilon && num_hypothesis>1) //no constraint violated { SG_PRINT("converged after %d iterations!\n", num_hypothesis); break; } float64_t factor=+1.0; if (max_dim>=num_svec) { factor=-1.0; max_dim-=num_svec; } SGSparseVectorEntry<float64_t>* h=sfeat[max_dim].features; int32_t len=sfeat[max_dim].num_feat_entries; solver.add_lpboost_constraint(factor, h, len, num_vec, labels); solver.optimize(u); //CMath::display_vector(u, num_vec, "u"); num_hypothesis++; if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time()) break; } float64_t* lambda=new float64_t[num_hypothesis]; solver.optimize(u, lambda); //CMath::display_vector(lambda, num_hypothesis, "lambda"); for (int32_t i=0; i<num_hypothesis; i++) { int32_t d=dim->get_element(i); if (d>=num_svec) w[d-num_svec]+=lambda[i]; else w[d]-=lambda[i]; } //solver.write_problem("problem.lp"); solver.cleanup(); cleanup(); return true; }
bool CCPLEXSVM::train_machine(CFeatures* data) { ASSERT(m_labels); ASSERT(m_labels->get_label_type() == LT_BINARY); bool result = false; CCplex cplex; if (data) { if (m_labels->get_num_labels() != data->get_num_vectors()) SG_ERROR("Number of training vectors does not match number of labels\n"); kernel->init(data, data); } if (cplex.init(E_QP)) { int32_t n,m; int32_t num_label=0; SGVector<float64_t> y=((CBinaryLabels*)m_labels)->get_labels(); SGMatrix<float64_t> H=kernel->get_kernel_matrix(); m=H.num_rows; n=H.num_cols; ASSERT(n>0 && n==m && n==num_label); float64_t* alphas=SG_MALLOC(float64_t, n); float64_t* lb=SG_MALLOC(float64_t, n); float64_t* ub=SG_MALLOC(float64_t, n); //hessian y'y.*K for (int32_t i=0; i<n; i++) { lb[i]=0; ub[i]=get_C1(); for (int32_t j=0; j<n; j++) H[i*n+j]*=y[j]*y[i]; } //feed qp to cplex int32_t j=0; for (int32_t i=0; i<n; i++) { if (alphas[i]>0) { //set_alpha(j, alphas[i]*labels->get_label(i)/etas[1]); set_alpha(j, alphas[i]*((CBinaryLabels*) m_labels)->get_int_label(i)); set_support_vector(j, i); j++; } } //compute_objective(); SG_INFO( "obj = %.16f, rho = %.16f\n",get_objective(),get_bias()); SG_INFO( "Number of SV: %ld\n", get_num_support_vectors()); SG_FREE(alphas); SG_FREE(lb); SG_FREE(ub); result = true; } if (!result) SG_ERROR( "cplex svm failed"); return result; }