/* -------------------------------------------------------------- Usage: exitflag = qpbsvm_prloqo(m_UB, m_dim, m_tmax, m_tolabs, m_tolrel, m_tolKKT, x, Nabla, &t, &History, verb ) -------------------------------------------------------------- */ int32_t CQPBSVMLib::qpbsvm_cplex(float64_t *x, float64_t *Nabla, int32_t *ptr_t, float64_t **ptr_History, int32_t verb) { float64_t* lb=new float64_t[m_dim]; float64_t* ub=new float64_t[m_dim]; for (int32_t i=0; i<m_dim; i++) { lb[i]=0; ub[i]=m_UB; } CCplex cplex; cplex.init(E_QP); cplex.setup_lp(m_f, NULL, 0, m_dim, NULL, lb, ub); cplex.setup_qp(m_H, m_dim); cplex.optimize(x); cplex.cleanup(); delete[] lb; delete[] ub; *ptr_t=0; *ptr_History=NULL; return 0; }
bool CLPM::train_machine(CFeatures* data) { ASSERT(labels); if (data) { if (!data->has_property(FP_DOT)) SG_ERROR("Specified features are not of type CDotFeatures\n"); set_features((CDotFeatures*) data); } ASSERT(features); int32_t num_train_labels=labels->get_num_labels(); int32_t num_feat=features->get_dim_feature_space(); int32_t num_vec=features->get_num_vectors(); ASSERT(num_vec==num_train_labels); SG_FREE(w); w=SG_MALLOC(float64_t, num_feat); w_dim=num_feat; int32_t num_params=1+2*num_feat+num_vec; //b,w+,w-,xi float64_t* params=SG_MALLOC(float64_t, num_params); memset(params,0,sizeof(float64_t)*num_params); CCplex solver; solver.init(E_LINEAR); SG_INFO("C=%f\n", C1); solver.setup_lpm(C1, (CSparseFeatures<float64_t>*) features, labels, get_bias_enabled()); if (get_max_train_time()>0) solver.set_time_limit(get_max_train_time()); bool result=solver.optimize(params); solver.cleanup(); set_bias(params[0]); for (int32_t i=0; i<num_feat; i++) w[i]=params[1+i]-params[1+num_feat+i]; //#define LPM_DEBUG #ifdef LPM_DEBUG CMath::display_vector(params,num_params, "params"); SG_PRINT("bias=%f\n", bias); CMath::display_vector(w,w_dim, "w"); CMath::display_vector(¶ms[1],w_dim, "w+"); CMath::display_vector(¶ms[1+w_dim],w_dim, "w-"); #endif SG_FREE(params); return result; }
bool CLPBoost::train(CFeatures* data) { ASSERT(labels); ASSERT(features); int32_t num_train_labels=labels->get_num_labels(); int32_t num_feat=features->get_dim_feature_space(); int32_t num_vec=features->get_num_vectors(); ASSERT(num_vec==num_train_labels); delete[] w; w=new float64_t[num_feat]; memset(w,0,sizeof(float64_t)*num_feat); w_dim=num_feat; CCplex solver; solver.init(E_LINEAR); SG_PRINT("setting up lpboost\n"); solver.setup_lpboost(C1, num_vec); SG_PRINT("finished setting up lpboost\n"); float64_t result=init(num_vec); ASSERT(result); int32_t num_hypothesis=0; CTime time; CSignal::clear_cancel(); while (!(CSignal::cancel_computations())) { int32_t max_dim=0; float64_t violator=find_max_violator(max_dim); SG_PRINT("iteration:%06d violator: %10.17f (>1.0) chosen: %d\n", num_hypothesis, violator, max_dim); if (violator <= 1.0+epsilon && num_hypothesis>1) //no constraint violated { SG_PRINT("converged after %d iterations!\n", num_hypothesis); break; } float64_t factor=+1.0; if (max_dim>=num_svec) { factor=-1.0; max_dim-=num_svec; } SGSparseVectorEntry<float64_t>* h=sfeat[max_dim].features; int32_t len=sfeat[max_dim].num_feat_entries; solver.add_lpboost_constraint(factor, h, len, num_vec, labels); solver.optimize(u); //CMath::display_vector(u, num_vec, "u"); num_hypothesis++; if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time()) break; } float64_t* lambda=new float64_t[num_hypothesis]; solver.optimize(u, lambda); //CMath::display_vector(lambda, num_hypothesis, "lambda"); for (int32_t i=0; i<num_hypothesis; i++) { int32_t d=dim->get_element(i); if (d>=num_svec) w[d-num_svec]+=lambda[i]; else w[d]-=lambda[i]; } //solver.write_problem("problem.lp"); solver.cleanup(); cleanup(); return true; }