void online_test(const vector<vector<MatrixXd> >& vec_data, const VectorXi& vec_label, const vector<MatrixXd>& weight_0, const MatrixXd& bias_0, const vector<MatrixXd>& weight_1, const MatrixXd& bias_1, const vector<MatrixXd>& weight_2, const MatrixXd& bias_2, const MatrixXd& weight_class, const MatrixXd& bias_class, const int& num_kerns1, const int& num_kerns2, const int& num_kerns3, const int& kern_size, const int& pool_size) { vector<vector<MatrixXd> > test_data; copy(vec_data.begin(), vec_data.begin() + 1, back_inserter(test_data)); VectorXi test_label; test_label = vec_label.segment(0, 1200); ConvPoolLayer *p_conv0 = new ConvPoolLayer(test_data, num_kerns1, kern_size, "same", "tanh", weight_0, bias_0); Pool *p_pool0 = new Pool(p_conv0->batch_maps_activated, pool_size, pool_size); ConvPoolLayer *p_conv1 = new ConvPoolLayer(p_pool0->output_batch_pooled, num_kerns2, kern_size, "same", "tanh", weight_1, bias_1); Pool *p_pool1 = new Pool(p_conv1->batch_maps_activated, pool_size, pool_size); ConvPoolLayer *p_conv2 = new ConvPoolLayer(p_pool1->output_batch_pooled, num_kerns3, kern_size, "same", "tanh", weight_2, bias_2); Pool *p_pool2 = new Pool(p_conv2->batch_maps_activated, pool_size, pool_size); MatrixXd feature_vectors; get_feature_vector(p_pool2->output_batch_pooled, feature_vectors); // classifier.input = feature_vectors; Softmax classifier(feature_vectors, 9, test_label, 1, 1, weight_class, bias_class); classifier.calculation_output(); cout << classifier.m.transpose() << endl; int accuracy = 0; for(int i = 0; i < test_label.size(); i++) { if(classifier.m(i) == test_label(i)) { accuracy ++; } } cout << "accuracy : " << accuracy << endl; }
template<class ST> bool CDenseFeatures<ST>::is_equal(CDenseFeatures* rhs) { if ( num_features != rhs->num_features || num_vectors != rhs->num_vectors ) return false; ST* vec1; ST* vec2; int32_t v1len, v2len; bool v1free, v2free, stop = false; for (int32_t i = 0; i < num_vectors; i++) { vec1 = get_feature_vector(i, v1len, v1free); vec2 = rhs->get_feature_vector(i, v2len, v2free); if (v1len!=v2len) stop = true; for (int32_t j=0; j<v1len; j++) { if (vec1[j]!=vec2[j]) stop = true; } free_feature_vector(vec1, i, v1free); free_feature_vector(vec2, i, v2free); if (stop) return false; } return true; }
template<class ST> void* CDenseFeatures<ST>::get_feature_iterator(int32_t vector_index) { if (vector_index>=get_num_vectors()) { SG_ERROR("Index out of bounds (number of vectors %d, you " "requested %d)\n", get_num_vectors(), vector_index); } dense_feature_iterator* iterator = SG_MALLOC(dense_feature_iterator, 1); iterator->vec = get_feature_vector(vector_index, iterator->vlen, iterator->vfree); iterator->vidx = vector_index; iterator->index = 0; return iterator; }
template<class ST> SGVector<ST> CDenseFeatures<ST>::get_feature_vector(int32_t num) { /* index conversion for subset, only for array access */ int32_t real_num=m_subset_stack->subset_idx_conversion(num); if (num >= get_num_vectors()) { SG_ERROR("Index out of bounds (number of vectors %d, you " "requested %d)\n", get_num_vectors(), real_num); } int32_t vlen; bool do_free; ST* vector= get_feature_vector(num, vlen, do_free); return SGVector<ST>(vector, vlen, do_free); }
template<> float64_t CDenseFeatures<float64_t>::dense_dot( int32_t vec_idx1, const float64_t* vec2, int32_t vec2_len) { ASSERT(vec2_len == num_features); int32_t vlen; bool vfree; float64_t* vec1 = get_feature_vector(vec_idx1, vlen, vfree); ASSERT(vlen == num_features); float64_t result = SGVector<float64_t>::dot(vec1, vec2, num_features); free_feature_vector(vec1, vec_idx1, vfree); return result; }
template<> float64_t CDenseFeatures<floatmax_t>::dense_dot( int32_t vec_idx1, const float64_t* vec2, int32_t vec2_len) { ASSERT(vec2_len == num_features); int32_t vlen; bool vfree; floatmax_t* vec1 = get_feature_vector(vec_idx1, vlen, vfree); ASSERT(vlen == num_features); float64_t result = 0; for (int32_t i = 0; i < num_features; i++) result += vec1[i] * vec2[i]; free_feature_vector(vec1, vec_idx1, vfree); return result; }
template<class ST> ST* CDenseFeatures<ST>::get_transposed(int32_t &num_feat, int32_t &num_vec) { num_feat = get_num_vectors(); num_vec = num_features; int32_t old_num_vec=get_num_vectors(); ST* fm = SG_MALLOC(ST, int64_t(num_feat) * num_vec); for (int32_t i=0; i<old_num_vec; i++) { SGVector<ST> vec=get_feature_vector(i); for (int32_t j=0; j<vec.vlen; j++) fm[j*int64_t(old_num_vec)+i]=vec.vector[j]; free_feature_vector(vec, i); } return fm; }
template<class ST> float64_t CDenseFeatures<ST>::dot(int32_t vec_idx1, CDotFeatures* df, int32_t vec_idx2) { ASSERT(df); ASSERT(df->get_feature_type() == get_feature_type()); ASSERT(df->get_feature_class() == get_feature_class()); CDenseFeatures<ST>* sf = (CDenseFeatures<ST>*) df; int32_t len1, len2; bool free1, free2; ST* vec1 = get_feature_vector(vec_idx1, len1, free1); ST* vec2 = sf->get_feature_vector(vec_idx2, len2, free2); float64_t result = SGVector<ST>::dot(vec1, vec2, len1); free_feature_vector(vec1, vec_idx1, free1); sf->free_feature_vector(vec2, vec_idx2, free2); return result; }
void CDenseFeatures<float64_t>::add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t* vec2, int32_t vec2_len, bool abs_val) { ASSERT(vec2_len == num_features); int32_t vlen; bool vfree; float64_t* vec1 = get_feature_vector(vec_idx1, vlen, vfree); ASSERT(vlen == num_features); if (abs_val) { for (int32_t i = 0; i < num_features; i++) vec2[i] += alpha * CMath::abs(vec1[i]); } else { SGVector<float64_t>::vec1_plus_scalar_times_vec2(vec2, alpha, vec1, num_features); } free_feature_vector(vec1, vec_idx1, vfree); }
template<class ST> void CDenseFeatures<ST>::add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t* vec2, int32_t vec2_len, bool abs_val) { ASSERT(vec2_len == num_features); int32_t vlen; bool vfree; ST* vec1 = get_feature_vector(vec_idx1, vlen, vfree); ASSERT(vlen == num_features); if (abs_val) { for (int32_t i = 0; i < num_features; i++) vec2[i] += alpha * CMath::abs(vec1[i]); } else { for (int32_t i = 0; i < num_features; i++) vec2[i] += alpha * vec1[i]; } free_feature_vector(vec1, vec_idx1, vfree); }