CSGObject *CGaussianKernel::shallow_copy() const { // TODO: remove this after all the classes get shallow_copy properly implemented // this assert is to avoid any subclass of CGaussianKernel accidentally called // with the implement here ASSERT(typeid(*this) == typeid(CGaussianKernel)) CGaussianKernel *ker = new CGaussianKernel(cache_size, width); if (lhs) { ker->init(lhs, rhs); } return ker; }
void fImgSvm::test_libsvm() { init_shogun(&print_message); index_t num_vec=imgvec.size(); index_t num_feat=SIFTN; index_t num_class=2; // create some data SGMatrix<float64_t> matrix(num_feat, num_vec); for(int i = 0 ; i < num_vec ; i ++ ) { for(int j = 0 ; j < num_feat ; j ++ ) { matrix(j,i) = imgvec[i][j]; } } //SGVector<float64_t>::range_fill_vector(matrix.matrix, num_feat*num_vec); // create vectors // shogun will now own the matrix created CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(matrix); // create three labels CMulticlassLabels* labels=new CMulticlassLabels(num_vec); for (index_t i=0; i<num_vec; ++i) labels->set_label(i,imgtrainlabelvec[i]); // create gaussian kernel with cache 10MB, width 0.5 CGaussianKernel* kernel = new CGaussianKernel(10, 0.5); kernel->init(features, features); // create libsvm with C=10 and train CMulticlassLibSVM* svm = new CMulticlassLibSVM(10, kernel, labels); svm->train(); // classify on training examples CMulticlassLabels* output=CMulticlassLabels::obtain_from_generic(svm->apply()); SGVector<float64_t>::display_vector(output->get_labels().vector, output->get_num_labels(), "初始的 output"); /* assert that batch apply and apply(index_t) give same result */ for (index_t i=0; i<output->get_num_labels(); ++i) { float64_t label=svm->apply_one(i); SG_SPRINT("result output[%d]=%f\n", i, label); ASSERT(output->get_label(i)==label); } SG_UNREF(output); // free up memory SG_UNREF(svm); exit_shogun(); }
int main() { const int32_t feature_cache=0; const int32_t kernel_cache=0; const float64_t rbf_width=10; const float64_t svm_C=10; const float64_t svm_eps=0.001; init_shogun(); gen_rand_data(); // create train labels CLabels* labels=new CLabels(SGVector<float64_t>(lab, NUM)); SG_REF(labels); // create train features CSimpleFeatures<float64_t>* features = new CSimpleFeatures<float64_t>(feature_cache); SG_REF(features); features->set_feature_matrix(feat, DIMS, NUM); // create gaussian kernel CGaussianKernel* kernel = new CGaussianKernel(kernel_cache, rbf_width); SG_REF(kernel); kernel->init(features, features); // create svm via libsvm and train CLibSVM* svm = new CLibSVM(svm_C, kernel, labels); SG_REF(svm); svm->set_epsilon(svm_eps); svm->train(); printf("num_sv:%d b:%f\n", svm->get_num_support_vectors(), svm->get_bias()); // classify + display output CLabels* out_labels=svm->apply(); for (int32_t i=0; i<NUM; i++) printf("out[%d]=%f\n", i, out_labels->get_label(i)); SG_UNREF(labels); SG_UNREF(out_labels); SG_UNREF(kernel); SG_UNREF(features); SG_UNREF(svm); exit_shogun(); return 0; }
int main(int argc, char** argv) { init_shogun(&print_message); index_t num_vec=300; index_t num_feat=20; index_t num_class=3; // create some data SGMatrix<float64_t> matrix(num_feat, num_vec); SGVector<float64_t>::range_fill_vector(matrix.matrix, num_feat*num_vec); // create vectors // shogun will now own the matrix created CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(matrix); // create three labels CMulticlassLabels* labels=new CMulticlassLabels(num_vec); for (index_t i=0; i<num_vec; ++i) labels->set_label(i, i%num_class); // create gaussian kernel with cache 10MB, width 0.5 CGaussianKernel* kernel = new CGaussianKernel(10, 0.5); kernel->init(features, features); // create libsvm with C=10 and train CMulticlassLibSVM* svm = new CMulticlassLibSVM(10, kernel, labels); svm->train(); // classify on training examples CMulticlassLabels* output=CMulticlassLabels::obtain_from_generic(svm->apply()); SGVector<float64_t>::display_vector(output->get_labels().vector, output->get_num_labels(), "batch output"); /* assert that batch apply and apply(index_t) give same result */ for (index_t i=0; i<output->get_num_labels(); ++i) { float64_t label=svm->apply_one(i); SG_SPRINT("single output[%d]=%f\n", i, label); ASSERT(output->get_label(i)==label); } SG_UNREF(output); // free up memory SG_UNREF(svm); exit_shogun(); return 0; }
int main(int argc, char** argv) { init_shogun(&print_message); // create some data float64_t* matrix = new float64_t[6]; for (int32_t i=0; i<6; i++) matrix[i]=i; // create three 2-dimensional vectors // shogun will now own the matrix created CSimpleFeatures<float64_t>* features= new CSimpleFeatures<float64_t>(); features->set_feature_matrix(matrix, 2, 3); // create three labels CLabels* labels=new CLabels(3); labels->set_label(0, -1); labels->set_label(1, +1); labels->set_label(2, -1); // create gaussian kernel with cache 10MB, width 0.5 CGaussianKernel* kernel = new CGaussianKernel(10, 0.5); kernel->init(features, features); // create libsvm with C=10 and train CLibSVM* svm = new CLibSVM(10, kernel, labels); svm->train(); // classify on training examples for (int32_t i=0; i<3; i++) SG_SPRINT("output[%d]=%f\n", i, svm->classify_example(i)); // free up memory SG_UNREF(svm); exit_shogun(); return 0; }
int main(int argc, char** argv) { init_shogun(&print_message); /* create some random data */ SGMatrix<float64_t> matrix(n,n); for(int32_t i=0; i<n*n; ++i) matrix.matrix[i]=CMath::random((float64_t)-n,(float64_t)n); CMath::display_matrix(matrix.matrix, n, n); /* create n n-dimensional feature vectors */ CDenseFeatures<float64_t>* features= new CDenseFeatures<float64_t>(matrix); /* create gaussian kernel with cache 10MB, width will be changed later */ CGaussianKernel* kernel = new CGaussianKernel(10, 0); kernel->init(features, features); /* create n labels (+1,-1,+1,-1,...) */ CLabels* labels=new CLabels(n); for (int32_t i=0; i<n; ++i) labels->set_label(i, i%2==0 ? +1 : -1); /* create libsvm with C=10 and produced labels */ CLibSVM* svm=new CLibSVM(10, kernel, labels); /* iterate over different width parameters */ for (int32_t i=0; i<10; ++i) { SG_SPRINT("\n\ncurrent kernel width: 2^%d=%f\n", i, CMath::pow(2.0,i)); float64_t width=CMath::pow(2.0,i); /* create parameter to change current kernel width */ Parameter* param=new Parameter(); param->add(&width, "width", ""); /* tell kernel to use the newly produced parameter */ kernel->m_parameters->set_from_parameters(param); /* print kernel matrix */ for (int32_t i=0; i<n; i++) { for (int32_t j=0; j<n; j++) SG_SPRINT("%f ", kernel->kernel(i,j)); SG_SPRINT("\n"); } /* train and classify */ svm->train(); for (int32_t i=0; i<n; ++i) SG_SPRINT("output[%d]=%f\treal[%d]=%f\n", i, svm->apply(i), i, labels->get_label(i)); delete param; } /* free up memory */ SG_UNREF(svm); exit_shogun(); return 0; }