bool fImgSvm::kmeans(SGMatrix<float64_t> &data , CDenseFeatures<float64_t>* ¢ers ,int32_t num_features) { init_shogun(&print_message); int32_t num_clusters= mwordnum ; int32_t dim_features=SIFTN; float64_t cluster_std_dev=2.0; /* build random cluster centers */ SGMatrix<float64_t> cluster_centers(dim_features, num_clusters); SGVector<float64_t>::random_vector(cluster_centers.matrix, dim_features*num_clusters, 0, 20.0); //SGMatrix<float64_t>::display_matrix(cluster_centers.matrix, cluster_centers.num_rows, // cluster_centers.num_cols, "cluster centers"); /* create features, SG_REF to avoid deletion */ CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t> (); features->set_feature_matrix(data); SG_REF(features); /* create labels for cluster centers */ CMulticlassLabels* labels=new CMulticlassLabels(num_features); for (index_t i=0; i<num_features; ++i) labels->set_label(i, 0); /* create distance */ CEuclideanDistance* distance=new CEuclideanDistance(features, features); /* create distance machine */ CKMeans* clustering=new CKMeans(num_clusters, distance); clustering->train(features); /* build clusters */ // CMulticlassLabels* result=CMulticlassLabels::obtain_from_generic(clustering->apply()); // for (index_t i=0; i<result->get_num_labels(); ++i) // SG_SPRINT("cluster index of vector %i: %f\n", i, result->get_label(i)); /* print cluster centers */ centers = (CDenseFeatures<float64_t>*)distance->get_lhs(); SGMatrix<float64_t> centers_matrix=centers->get_feature_matrix(); //SG_UNREF(result); SG_UNREF(centers); SG_UNREF(clustering); SG_UNREF(labels); SG_UNREF(features); exit_shogun(); }
void R_unload_sg(DllInfo *info) #endif { #ifdef HAVE_PYTHON CPythonInterface::run_python_exit(); #endif #ifdef HAVE_OCTAVE COctaveInterface::run_octave_exit(); #endif exit_shogun(); }
void fImgSvm::test_libsvm() { init_shogun(&print_message); index_t num_vec=imgvec.size(); index_t num_feat=SIFTN; index_t num_class=2; // create some data SGMatrix<float64_t> matrix(num_feat, num_vec); for(int i = 0 ; i < num_vec ; i ++ ) { for(int j = 0 ; j < num_feat ; j ++ ) { matrix(j,i) = imgvec[i][j]; } } //SGVector<float64_t>::range_fill_vector(matrix.matrix, num_feat*num_vec); // create vectors // shogun will now own the matrix created CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(matrix); // create three labels CMulticlassLabels* labels=new CMulticlassLabels(num_vec); for (index_t i=0; i<num_vec; ++i) labels->set_label(i,imgtrainlabelvec[i]); // create gaussian kernel with cache 10MB, width 0.5 CGaussianKernel* kernel = new CGaussianKernel(10, 0.5); kernel->init(features, features); // create libsvm with C=10 and train CMulticlassLibSVM* svm = new CMulticlassLibSVM(10, kernel, labels); svm->train(); // classify on training examples CMulticlassLabels* output=CMulticlassLabels::obtain_from_generic(svm->apply()); SGVector<float64_t>::display_vector(output->get_labels().vector, output->get_num_labels(), "初始的 output"); /* assert that batch apply and apply(index_t) give same result */ for (index_t i=0; i<output->get_num_labels(); ++i) { float64_t label=svm->apply_one(i); SG_SPRINT("result output[%d]=%f\n", i, label); ASSERT(output->get_label(i)==label); } SG_UNREF(output); // free up memory SG_UNREF(svm); exit_shogun(); }
void fImgSvm::test_libsvm2() { init_shogun(&print_message); const int32_t feature_cache=0; const int32_t kernel_cache=0; const float64_t rbf_width=10; const float64_t svm_C=10; const float64_t svm_eps=0.001; int32_t num=mtrainimgsum; int32_t dims=SIFTN; float64_t dist=0.5; SGVector<float64_t> lab(num); //标签 SGMatrix<float64_t> feat(dims, num); //gen_rand_data(lab, feat, dist); for(int i = 0 ; i < num ; i ++ ) { for(int j = 0 ; j < dims ; j ++ ) { feat(j,i) = imgvec[i][j]; } } for(int i = 0 ; i < num ; i ++ ) { //lab[i] = imglabelvec[i]*1.0; if(imgtrainlabelvec[i] == 1) lab[i] = -1.0; else lab[i] = 1.0; } // create train labels CLabels* labels=new CBinaryLabels(lab); // create train features CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(feature_cache); SG_REF(features); features->set_feature_matrix(feat); // create gaussian kernel CGaussianKernel* kernel=new CGaussianKernel(kernel_cache, rbf_width); SG_REF(kernel); kernel->init(features, features); // create svm via libsvm and train CLibSVM* svm=new CLibSVM(svm_C, kernel, labels); SG_REF(svm); svm->set_epsilon(svm_eps); svm->train(); SG_SPRINT("num_sv:%d b:%f\n", svm->get_num_support_vectors(), svm->get_bias()); // classify + display output CBinaryLabels* out_labels=CBinaryLabels::obtain_from_generic(svm->apply()); for (int32_t i=0; i<num; i++) { SG_SPRINT("out[%d]=%f (%f)\n", i, out_labels->get_label(i), out_labels->get_confidence(i)); } CBinaryLabels* result = CBinaryLabels::obtain_from_generic (svm->apply(features) ); for (int32_t i=0; i<3; i++) SG_SPRINT("output[%d]=%f\n", i, result->get_label(i)); // update // predict the printf("----------------test -----------------\n"); getTestImg(imgtestvec); int32_t testnum = mtestingsum; SGMatrix<float64_t> testfeat(dims, testnum); for(int i = 0 ; i < testnum ; i ++ ) { for(int j = 0 ; j < dims ; j ++ ) { testfeat(j,i) = imgtestvec[i][j]; } } CDenseFeatures<float64_t>* testfeatures=new CDenseFeatures<float64_t>(feature_cache); SG_REF(testfeatures); testfeatures->set_feature_matrix(testfeat); CBinaryLabels* testresult = CBinaryLabels::obtain_from_generic (svm->apply(testfeatures) ); int32_t rightnum1 = 0; int32_t rightsum1 = 0; int32_t rightnum2 = 0; for (int32_t i=0; i<testnum; i++) { SG_SPRINT("output[%d]=%f\n", i, testresult->get_label(i)); if(imgtestlabelvec[i] == 1 ) { if( (testresult->get_label(i)) < 0.0) { rightnum1 ++; } rightsum1 ++ ; } else if(imgtestlabelvec[i] == 2 && testresult->get_label(i) > 0.0) { rightnum2 ++ ; } } printf(" %lf\n ",(rightnum1+rightnum2)*1.0 / testnum); printf("class 1 : %lf\n",rightnum1 *1.0 / rightsum1); printf("class 2 : %lf\n",rightnum2 *1.0 / (testnum - rightsum1)); SG_UNREF(out_labels); SG_UNREF(kernel); SG_UNREF(features); SG_UNREF(svm); exit_shogun(); }