cv::Mat learndictionary::learn(const std::vector<std::string>& classes, int imagesPerClass, int clusterCentersPerClass) { cv::Mat centers; for(unsigned int i = 0; i < classes.size(); i++) { cv::Mat responses(0, FilterBank::getInstance().size(), CV_32F); std::string classDir(dir + classes.at(i)); addResponsesForClass(classDir, imagesPerClass, responses); //TODO: done //create the cluster centers using the k-means algorithm //and write each result into centers. //hint: cv::kmeans, cv::vconcat cv::Mat labels; cv::Mat classCenters = cv::Mat::zeros(clusterCentersPerClass, responses.cols, CV_32F); cv::kmeans(responses, clusterCentersPerClass, labels, cv::TermCriteria(), 5, cv::KMEANS_RANDOM_CENTERS, classCenters); if(i == 0) { centers = classCenters.clone(); } else { cv::vconcat(centers, classCenters, centers); } } return centers; }
CClassifier * train(EClassifierType clt, CCaptcha * captcha, unsigned train_vec, unsigned test_num) { CClassifier * cl = NULL; try { throw_null((cl = new CClassifier(clt))); unsigned u, v, tv, t, num, yes; Mat train_data(train_vec, dim, CV_32FC1); Mat responses(train_vec, 1, CV_32SC1); for(u = 0; u < train_vec; u++) { num = rand() % 10; Mat img = (* captcha)(num); Mat vc = vec(img); for(t = 0; t < dim; t++) train_data.at<float>(u, t) = vc.at<float>(0, t); responses.at<int>(u, 0) = num; } cl->train(train_data, responses); for(u = 0, yes = 0; u < test_num; u++) { num = rand() % 10; Mat img = (* captcha)(num); Mat vc = vec(img); if(num == cl->predict(vc)) yes++; } printf("Правильно классифицированных векторов: %u из %u (%f %%)\n", yes, test_num, yes * 100 / (double) test_num); } catch(...) { if(cl != NULL) delete cl; cl = NULL; } return cl; }
void mexFunctionTrain(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if (nrhs != 4) { mexPrintf("Usage: model = LDARegStump(feats, responses, weights, numLevels)\n"); mexErrMsgTxt("Incorrect input format\n"); } if (nlhs != 1) mexErrMsgTxt("One output args expected"); #define mFeats (prhs[0]) #define mResponses (prhs[1]) #define mWeights (prhs[2]) #define mNumLevels (prhs[3]) MatlabInputMatrix<FeatsType> pFeats( mFeats, 0, 0, "feats" ); MatlabInputMatrix<WeightsType> pResponses( mResponses, pFeats.rows(), 1, "labels" ); MatlabInputMatrix<WeightsType> pWeights( mWeights, pFeats.rows(), 1, "labels" ); if ( mxGetClassID(mNumLevels) != mxUINT32_CLASS ) mexErrMsgTxt("numLevels must be UINT32"); if ( mxGetNumberOfElements(mNumLevels) != 1 ) mexErrMsgTxt("numLevels must be a scalar"); const unsigned maxDepth = ((unsigned int *)mxGetData(mNumLevels))[0]; RegTreeType tree; { // for now just copy the values Eigen::Map< const gFeatArrayType > feats( pFeats.data(), pFeats.rows(), pFeats.cols() ); Eigen::Map< const gResponseArrayType > responses( pResponses.data(), pResponses.rows() ); Eigen::Map< const gWeightsArrayType > weights( pWeights.data(), pWeights.rows() ); tree.learn( MatrixSampleIndexListType(feats), MatrixFeatureIndexListType(feats), MatrixFeatureValueObjectType(feats), MatrixResponseAndWeightsValueObject( responses, weights ), maxDepth); } plhs[0] = tree.saveToMatlab(); #undef mFeats #undef mResponses #undef mWeights }
static void usage() { char msg[] = "Usage: [-l level] [-L lang] [-s gggg eeee] [-v] <tag> <mask> <test dir> <master dir> \n\ \n\ -l Set output log level (1-4, default is 1) \n\ -L Set language of responses (Japanese) \n\ -s Specify the tag of a sequence to use as part of key\n\ tag DICOM tag of attribute to use as key (gggg eeee)\n\ mask Name of mask file that specifies attributes to test \n\ test dir Directory containing test responses to evaluate \n\ master dir Directory containing gold standard responses"; cerr << msg << endl; ::exit(1); }
Vector NeuralNetwork::FeedForward( const Vector& input, const Vector& layerBias, const Matrix& layerWeights ) const { int numNodes = ( int )layerBias.size(); Vector responses( numNodes ); for( int nodeIdx = 0; nodeIdx < numNodes; nodeIdx++ ) { const Vector& weights = layerWeights[ nodeIdx ]; float bias = layerBias[ nodeIdx ]; assert( weights.size() == input.size() ); // z = b + w * x float t = bias + std::inner_product( input.begin(), input.end(), weights.begin(), 0.0f ); float y = 1.0f / ( 1.0f + expf(-t) ); responses[ nodeIdx ] = y; } return responses; }
int classifyOneAgainstAllSvm(vector<CvSVM> classifiers, Mat image) { /* Extract features */ Mat featureVector(FEAT_SIZE, 1, CV_32FC1); extractFeatures(image, featureVector); /* Responses from the classifiers */ vector<float> responses(classifiers.size()); for (int i = 0; i < classifiers.size(); i++) { responses[i] = classifiers[i].predict(featureVector, true); } /* Argmax of responses */ int argmax = 0; for (int i = 1; i < responses.size(); i++) { if (responses[i] > responses[argmax]) { argmax = i; } } /* Got a label or a rejection? */ return (responses[argmax] > 0) ? (argmax+1) : 0; }
bool SVMClassifier::train() { assert(trainData.size()); cv::Mat trainDataMat(size, length, CV_32FC1); cv::Mat responses(size, 1, CV_32FC1); for(size_t i = 0; i < size; ++i) { responses.at<float>(i,0) = trainData[i].label; for(size_t j = 0; j < length; ++j) { trainDataMat.at<float>(i,j) = trainData[i][j]; } } if(autoTrain) { return model.train_auto(trainDataMat, responses, cv::Mat(), cv::Mat(), params, kFold); } else { return model.train(trainDataMat, responses, cv::Mat(), cv::Mat(), params); } }
void pcl::DOTMOD:: detectTemplates (const std::vector<DOTModality*> & modalities, const float template_response_threshold, std::vector<DOTMODDetection> & detections, const size_t bin_size ) const { //std::cerr << ">> detectTemplates (...)" << std::endl; std::vector<QuantizedMap> maps; const size_t nr_modalities = modalities.size (); //std::cerr << "nr_modalities: " << nr_modalities << std::endl; for (size_t modality_index = 0; modality_index < nr_modalities; ++modality_index) { QuantizedMap &map = modalities[modality_index]->getDominantQuantizedMap (); maps.push_back(map); } //std::cerr << "1" << std::endl; const size_t width = maps[0].getWidth (); const size_t height = maps[0].getHeight (); const size_t nr_templates = templates_.size (); const size_t nr_template_horizontal_bins = template_width_ / bin_size; const size_t nr_template_vertical_bins = template_height_ / bin_size; //std::cerr << "---------------------------------------------------" << std::endl; //std::cerr << "width: " << width << std::endl; //std::cerr << "height: " << height << std::endl; //std::cerr << "nr_templates: " << nr_templates << std::endl; //std::cerr << "nr_template_horizontal_bins: " << nr_template_horizontal_bins << std::endl; //std::cerr << "nr_template_vertical_bins: " << nr_template_vertical_bins << std::endl; //std::cerr << "template_width_: " << template_width_ << std::endl; //std::cerr << "template_height_: " << template_height_ << std::endl; //std::cerr << "2" << std::endl; float best_response = 0.0f; for (size_t row_index = 0; row_index < (height - nr_template_vertical_bins); ++row_index) { for (size_t col_index = 0; col_index < (width - nr_template_horizontal_bins); ++col_index) { std::vector<float> responses (nr_templates, 0.0f); for (size_t modality_index = 0; modality_index < nr_modalities; ++modality_index) { const QuantizedMap map = maps[modality_index].getSubMap (col_index, row_index, nr_template_horizontal_bins, nr_template_vertical_bins); const unsigned char * image_data = map.getData (); for (size_t template_index = 0; template_index < nr_templates; ++template_index) { const unsigned char * template_data = &(templates_[template_index].modalities[modality_index].features[0]); for (size_t data_index = 0; data_index < (nr_template_horizontal_bins*nr_template_vertical_bins); ++data_index) { if ((image_data[data_index] & template_data[data_index]) != 0) responses[template_index] += 1.0f; } } } // find templates with response over threshold const float scaling_factor = 1.0f / (nr_template_horizontal_bins * nr_template_vertical_bins); for (size_t template_index = 0; template_index < nr_templates; ++template_index) { const float response = responses[template_index] * scaling_factor; if (response > template_response_threshold) { DOTMODDetection detection; detection.score = response; detection.template_id = template_index; detection.bin_x = col_index; detection.bin_y = row_index; detections.push_back (detection); } if (response > best_response) best_response = response; } } } //std::cerr << "best_response: " << best_response << std::endl; //std::cerr << "<< detectTemplates (...)" << std::endl; }
int NeoflowHTTPCorrelator<RequestReaderType, ResponseReaderType, HTTPWriterType, HTTPRequestWriterType, HTTPResponseWriterType, HTTPRequestRefWriterType, HTTPResponseRefWriterType, ConnectionHTTPRefWriterType> ::run() { HTTPRequest http_request; HTTPResponse http_response; IPv4Network network; ConnectionSearchHTTP connection_search_http; std::tr1::unordered_map<std::string, HTTP> http_map; uint8_t id_protocol; uint32_t id_src_ip; uint32_t id_dst_ip; uint16_t id_src_port; uint16_t id_dst_port; ConnectionSearchHTTP connectionSearchHTTP; ConnectionSearchHTTPRequest requestRow; ConnectionSearchHTTPResponse responseRow; ConnectionSearchHTTPRequestReference requestRef; ConnectionSearchHTTPResponseReference responseRef; ConnectionSearchConnectionHTTPReference connHTTPRef; ErrorStatus errorStatus; std::string httpFlowID; // fill up the http_map while ((errorStatus = _request_reader->read(http_request)) == E_SUCCESS) { // formulate the map key id_protocol = http_request.protocol(); httpFlowID.assign(reinterpret_cast<char *>(&id_protocol), sizeof(id_protocol)); id_src_ip = http_request.raw_source_ip(); httpFlowID.append(reinterpret_cast<char *>(&id_src_ip), sizeof(id_src_ip)); id_dst_ip = http_request.raw_destination_ip(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_ip), sizeof(id_dst_ip)); id_src_port = http_request.raw_source_port(); httpFlowID.append(reinterpret_cast<char *>(&id_src_port), sizeof(id_src_port)); id_dst_port = http_request.raw_destination_port(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_port), sizeof(id_dst_port)); // check if there's an HTTP already in the map // if not, create one std::tr1::unordered_map<std::string, HTTP>::iterator httpIt(http_map.find(httpFlowID)); if (httpIt == http_map.end()) { httpIt = http_map.insert(std::make_pair(httpFlowID, HTTP(id_protocol, id_src_ip, id_dst_ip, id_src_port, id_dst_port))).first; } // add this http_request to the HTTP in the map httpIt->second.add_request(http_request); } if (errorStatus != E_EOF) { _error = true; _errorMsg.assign("Reader: error reading http_request"); return 1; } while ((errorStatus = _response_reader->read(http_response)) == E_SUCCESS) { // formulate the map key id_protocol = http_response.protocol(); httpFlowID.assign(reinterpret_cast<char *>(&id_protocol), sizeof(id_protocol)); id_dst_ip = http_response.raw_destination_ip(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_ip), sizeof(id_dst_ip)); id_src_ip = http_response.raw_source_ip(); httpFlowID.append(reinterpret_cast<char *>(&id_src_ip), sizeof(id_src_ip)); id_dst_port = http_response.raw_destination_port(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_port), sizeof(id_dst_port)); id_src_port = http_response.raw_source_port(); httpFlowID.append(reinterpret_cast<char *>(&id_src_port), sizeof(id_src_port)); std::tr1::unordered_map<std::string, HTTP>::iterator httpIt(http_map.find(httpFlowID)); if (httpIt != http_map.end()) { // add it httpIt->second.add_response(http_response); } httpIt = http_map.insert(make_pair(httpFlowID, HTTP(id_protocol, id_dst_ip, id_src_ip, id_dst_port, id_src_port))).first; // add this http_response to the HTTP in the map httpIt->second.add_response(http_response); } if (errorStatus != E_EOF) { _error = true; _errorMsg.assign("Reader: error reading response"); return 1; } // for each Connection for (std::set<ConnectionSearchConnection>::const_iterator i(_set->begin()); i != _set->end(); ++i) { // create the flowID id_protocol = i->protocol(); httpFlowID.assign(reinterpret_cast<char *>(&id_protocol), sizeof(id_protocol)); id_dst_ip = i->raw_ip_a(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_ip), sizeof(id_dst_ip)); id_src_ip = i->raw_ip_b(); httpFlowID.append(reinterpret_cast<char *>(&id_src_ip), sizeof(id_src_ip)); id_dst_port = i->raw_port_a(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_port), sizeof(id_dst_port)); id_src_port = i->raw_port_b(); httpFlowID.append(reinterpret_cast<char *>(&id_src_port), sizeof(id_src_port)); // check for flow ID std::tr1::unordered_map<std::string, HTTP>::iterator httpIt(http_map.find(httpFlowID)); if (httpIt == http_map.end()) { // if not found, create reverse flowID httpFlowID.assign(reinterpret_cast<char *>(&id_protocol), sizeof(id_protocol)); id_dst_ip = i->raw_ip_b(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_ip), sizeof(id_dst_ip)); id_src_ip = i->raw_ip_a(); httpFlowID.append(reinterpret_cast<char *>(&id_src_ip), sizeof(id_src_ip)); id_dst_port = i->raw_port_b(); httpFlowID.append(reinterpret_cast<char *>(&id_dst_port), sizeof(id_dst_port)); id_src_port = i->raw_port_a(); httpFlowID.append(reinterpret_cast<char *>(&id_src_port), sizeof(id_src_port)); httpIt = http_map.find(httpFlowID); if (httpIt == http_map.end()) { continue; } } // found...write http and xref info connectionSearchHTTP.http_id(_http_id); connectionSearchHTTP.start_time(httpIt->second.start_time()); connectionSearchHTTP.end_time(httpIt->second.end_time()); connectionSearchHTTP.protocol(httpIt->second.protocol()); connectionSearchHTTP.client_ip(httpIt->second.client_ip()); connectionSearchHTTP.server_ip(httpIt->second.server_ip()); connectionSearchHTTP.client_port(httpIt->second.client_port()); connectionSearchHTTP.server_port(httpIt->second.server_port()); _http_writer->write(connectionSearchHTTP); connHTTPRef.http_id(_http_id); connHTTPRef.connection_id(i->connection_id()); _conn_http_ref_writer->write(connHTTPRef); // write the requests std::vector<HTTPRequest> requests(httpIt->second.requests()); requestRef.http_id(_http_id); for (std::vector<HTTPRequest>::const_iterator req(requests.begin()); req != requests.end(); ++req) { requestRow.http_request_id(_http_request_id); requestRow.time(req->time()); requestRow.protocol(req->protocol()); requestRow.source_ip(req->source_ip()); requestRow.destination_ip(req->destination_ip()); requestRow.source_port(req->source_port()); requestRow.destination_port(req->destination_port()); requestRow.type(req->type()); requestRow.uri(req->uri()); requestRow.version(req->version()); requestRow.host(req->host()); requestRow.user_agent(req->user_agent()); requestRow.referer(req->referer()); _request_writer->write(requestRow); requestRef.http_request_id(_http_request_id); _request_ref_writer->write(requestRef); ++_http_request_id; } // write the responses std::vector<HTTPResponse> responses(httpIt->second.responses()); responseRef.http_id(_http_id); for (std::vector<HTTPResponse>::const_iterator res(responses.begin()); res != responses.end(); ++res) { responseRow.http_response_id(_http_response_id); responseRow.time(res->time()); responseRow.protocol(res->protocol()); responseRow.source_ip(res->source_ip()); responseRow.destination_ip(res->destination_ip()); responseRow.source_port(res->source_port()); responseRow.destination_port(res->destination_port()); responseRow.version(res->version()); responseRow.status(res->status()); responseRow.response(res->response()); responseRow.reason(res->reason()); responseRow.content_type(res->content_type()); _response_writer->write(responseRow); responseRef.http_response_id(_http_response_id); _response_ref_writer->write(responseRef); ++_http_response_id; } ++_http_id; } return 0; }
TEST_P(ML_ANN_METHOD, Test) { int methodType = get<0>(GetParam()); string methodName = get<1>(GetParam()); int N = get<2>(GetParam()); String folder = string(cvtest::TS::ptr()->get_data_path()); String original_path = folder + "waveform.data"; String dataname = folder + "waveform" + '_' + methodName; Ptr<TrainData> tdata2 = TrainData::loadFromCSV(original_path, 0); Mat samples = tdata2->getSamples()(Range(0, N), Range::all()); Mat responses(N, 3, CV_32FC1, Scalar(0)); for (int i = 0; i < N; i++) responses.at<float>(i, static_cast<int>(tdata2->getResponses().at<float>(i, 0))) = 1; Ptr<TrainData> tdata = TrainData::create(samples, ml::ROW_SAMPLE, responses); ASSERT_FALSE(tdata.empty()) << "Could not find test data file : " << original_path; RNG& rng = theRNG(); rng.state = 0; tdata->setTrainTestSplitRatio(0.8); Mat testSamples = tdata->getTestSamples(); #ifdef GENERATE_TESTDATA { Ptr<ml::ANN_MLP> xx = ml::ANN_MLP_ANNEAL::create(); Mat_<int> layerSizesXX(1, 4); layerSizesXX(0, 0) = tdata->getNVars(); layerSizesXX(0, 1) = 30; layerSizesXX(0, 2) = 30; layerSizesXX(0, 3) = tdata->getResponses().cols; xx->setLayerSizes(layerSizesXX); xx->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM); xx->setTrainMethod(ml::ANN_MLP::RPROP); xx->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1, 0.01)); xx->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE); FileStorage fs; fs.open(dataname + "_init_weight.yml.gz", FileStorage::WRITE + FileStorage::BASE64); xx->write(fs); fs.release(); } #endif { FileStorage fs; fs.open(dataname + "_init_weight.yml.gz", FileStorage::READ); Ptr<ml::ANN_MLP> x = ml::ANN_MLP_ANNEAL::create(); x->read(fs.root()); x->setTrainMethod(methodType); if (methodType == ml::ANN_MLP::ANNEAL) { x->setAnnealEnergyRNG(RNG(CV_BIG_INT(0xffffffff))); x->setAnnealInitialT(12); x->setAnnealFinalT(0.15); x->setAnnealCoolingRatio(0.96); x->setAnnealItePerStep(11); } x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.01)); x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE + ml::ANN_MLP::UPDATE_WEIGHTS); ASSERT_TRUE(x->isTrained()) << "Could not train networks with " << methodName; string filename = dataname + ".yml.gz"; Mat r_gold; #ifdef GENERATE_TESTDATA x->save(filename); x->predict(testSamples, r_gold); { FileStorage fs_response(dataname + "_response.yml.gz", FileStorage::WRITE + FileStorage::BASE64); fs_response << "response" << r_gold; } #else { FileStorage fs_response(dataname + "_response.yml.gz", FileStorage::READ); fs_response["response"] >> r_gold; } #endif ASSERT_FALSE(r_gold.empty()); Ptr<ml::ANN_MLP> y = Algorithm::load<ANN_MLP>(filename); ASSERT_TRUE(y != NULL) << "Could not load " << filename; Mat rx, ry; for (int j = 0; j < 4; j++) { rx = x->getWeights(j); ry = y->getWeights(j); double n = cvtest::norm(rx, ry, NORM_INF); EXPECT_LT(n, FLT_EPSILON) << "Weights are not equal for layer: " << j; } x->predict(testSamples, rx); y->predict(testSamples, ry); double n = cvtest::norm(ry, rx, NORM_INF); EXPECT_LT(n, FLT_EPSILON) << "Predict are not equal to result of the saved model"; n = cvtest::norm(r_gold, rx, NORM_INF); EXPECT_LT(n, FLT_EPSILON) << "Predict are not equal to 'gold' response"; } }