void Key_Reader::readFile(vector<cv::Point2f>& featureP,double px,double py){ const char* filePath = this->getFilePath(); ifstream inputFile; inputFile.open(filePath); int numKeys; int lengthDisct; inputFile >> numKeys >> lengthDisct; for(int i=0;i<numKeys;i++){ float x,y,o,s; int null; // Key file is y, x apparently. inputFile >> y >> x >> o >> s; for(int j=0;j<lengthDisct;j++){ inputFile >> null; } //Make 2d feature point cv::Point2f feat((x-px),(y-py)); // cv::Point2f feat((x),(y)); //Add to vector featureP.push_back(feat); } inputFile.close(); }
void Key_Reader::readFile(vector<cv::Point2f>& featureP){ const char* filePath = this->getFilePath(); ifstream inputFile; inputFile.open(filePath); int numKeys; int lengthDisct; inputFile >> numKeys >> lengthDisct; for(int i=0;i<numKeys;i++){ float x,y,o,s; int null; // Key file is y, x apparently. inputFile >> y >> x >> o >> s; for(int j=0;j<lengthDisct;j++){ inputFile >> null; } //Make 2d feature point //FIX need to impliment using POS input cv::Point2f feat(x,y); //Add to vector featureP.push_back(feat); } inputFile.close(); }
// REF [file] >> ${WAFFLES_HOME}/demos/hello_ml/src/main.cpp. void ml_example() { // Define the feature attributes (or columns) std::vector<size_t> feature_values; feature_values.push_back(0); // diameter = continuous feature_values.push_back(3); // crust_type = { thin_crust=0, Chicago_style_deep_dish=1, Neapolitan=2 } feature_values.push_back(2); // meatiness = { vegan=0, meaty=1 } feature_values.push_back(4); // presentation = { dine_in=0, take_out=1, delivery=2, frozen=3 } // Define the label attributes (or columns) std::vector<size_t> label_values; label_values.push_back(2); // taste = { lousy=0, delicious=1 } label_values.push_back(0); // cost = continuous // Make some contrived hard-coded training data GClasses::GMatrix feat(feature_values); feat.newRows(12); GClasses::GMatrix lab(label_values); lab.newRows(12); // diameter crust meatiness presentation taste cost feat[0][0] = 14.0; feat[0][1] = 1; feat[0][2] = 1; feat[0][3] = 0; lab[0][0] = 1; lab[0][1] = 22.95; feat[1][0] = 12.0; feat[1][1] = 0; feat[1][2] = 0; feat[1][3] = 3; lab[1][0] = 0; lab[1][1] = 3.29; feat[2][0] = 14.0; feat[2][1] = 1; feat[2][2] = 1; feat[2][3] = 2; lab[2][0] = 1; lab[2][1] = 15.49; feat[3][0] = 12.0; feat[3][1] = 2; feat[3][2] = 0; feat[3][3] = 0; lab[3][0] = 1; lab[3][1] = 16.65; feat[4][0] = 18.0; feat[4][1] = 1; feat[4][2] = 1; feat[4][3] = 3; lab[4][0] = 0; lab[4][1] = 9.99; feat[5][0] = 14.0; feat[5][1] = 1; feat[5][2] = 1; feat[5][3] = 0; lab[5][0] = 1; lab[5][1] = 14.49; feat[6][0] = 12.0; feat[6][1] = 2; feat[6][2] = 0; feat[6][3] = 2; lab[6][0] = 1; lab[6][1] = 19.65; feat[7][0] = 14.0; feat[7][1] = 0; feat[7][2] = 1; feat[7][3] = 1; lab[7][0] = 0; lab[7][1] = 6.99; feat[8][0] = 14.0; feat[8][1] = 1; feat[8][2] = 1; feat[8][3] = 2; lab[8][0] = 1; lab[8][1] = 19.95; feat[9][0] = 14.0; feat[9][1] = 2; feat[9][2] = 0; feat[9][3] = 3; lab[9][0] = 0; lab[9][1] = 12.99; feat[10][0] = 16.0; feat[10][1] = 0; feat[10][2] = 1; feat[10][3] = 0; lab[10][0] = 0; lab[10][1] = 12.20; feat[11][0] = 14.0; feat[11][1] = 1; feat[11][2] = 1; feat[11][3] = 1; lab[11][0] = 1; lab[11][1] = 15.01; // Make a test vector GClasses::GVec test_features(4); GClasses::GVec predicted_labels(2); std::cout << "This demo trains and tests several supervised learning models using some contrived hard-coded training data to predict the tastiness and cost of a pizza.\n\n"; test_features[0] = 15.0; test_features[1] = 2; test_features[2] = 0; test_features[3] = 0; std::cout << "Predicting labels for a 15 inch pizza with a Neapolitan-style crust, no meat, for dine-in.\n\n"; // Use several models to make predictions std::cout.precision(4); local::decision_tree(feat, lab, test_features, predicted_labels); std::cout << "The decision tree predicts the taste is " << (predicted_labels[0] == 0 ? "lousy" : "delicious") << ", and the cost is $" << predicted_labels[1] << std::endl; local::neural_network(feat, lab, test_features, predicted_labels); std::cout << "The neural network predicts the taste is " << (predicted_labels[0] == 0 ? "lousy" : "delicious") << ", and the cost is $" << predicted_labels[1] << std::endl; local::knn(feat, lab, test_features, predicted_labels); std::cout << "The knn model predicts the taste is " << (predicted_labels[0] == 0 ? "lousy" : "delicious") << ", and the cost is $" << predicted_labels[1] << std::endl; local::naivebayes(feat, lab, test_features, predicted_labels); std::cout << "The naive Bayes model predicts the taste is " << (predicted_labels[0] == 0 ? "lousy" : "delicious") << ", and the cost is $" << predicted_labels[1] << std::endl; local::ensemble(feat, lab, test_features, predicted_labels); std::cout << "Random forest predicts the taste is " << (predicted_labels[0] == 0 ? "lousy" : "delicious") << ", and the cost is $" << predicted_labels[1] << std::endl; }
/* * UINT nxbmax = xbmax+2*padx,nybmax=ybmax+2*pady; vector<REAL> feat(nxbmax*nybmax*norient,0); UINT counter=0, offset = (padx + pady * nxbmax) * norient,coffset,roffset; for(UINT i= 0; i < ybmax; ++i) { roffset = offset + i * nxbmax* norient; for(UINT j=0; j < xbmax;++j) { coffset = roffset + j*norient; for(UINT k=0; k < norient;++k) feat[coffset+k]=hogvalue[counter++]; } } xbmax = nxbmax; ybmax=nybmax; hogvalue=feat;*/ void LBPMap::PadFeatureMap(UINT padx, UINT pady) {// to pad the boundary values with -1 UINT xpoints = nxpoints + 2 * padx, ypoints = nypoints + 2 * pady; vector<int> feat(xpoints * ypoints, -1); UINT counter = 0, roffset, offset = (padx + pady * xpoints); for (UINT i = 0; i < nypoints; ++i) { roffset = offset + i * xpoints; for (UINT j = 0; j < nxpoints; ++j) { feat[roffset + j] = lbpmap[counter++]; } } nxpoints = xpoints; nypoints = ypoints; lbpmap = feat; }
void M_Feature::AddRow(vector<double> vals, double score) { assert(vals.size() == f_dim); V_Feature feat(f_dim); feat.UpdateScore(score); feat.byte = 1; for (int i=0; i < f_dim; i++) { feat.Updatefeature(vals[i], i); } feat.id = num_instance; num_instance++; MatF.push_back(feat); }
// set ordered feats // and return the list of features to be deleted std::vector<int> ObservationManager::preprocessFeats(const std::vector<int>& prev_feats_idx, const int max_feats, const std::vector<cv::Rect> &targets) { std::vector<int> deleted_feats; std::vector<int> current_feat_idx; std::vector<float> responses; std::vector<cv::Point2f> feat_pts; feat_tracker_->get_features(time_sec_, feat_pts, responses, current_feat_idx); for(size_t i = 0; i < feat_pts.size(); i++) { feat_pts[i].y += floor(img_mono_.rows / 2); } std::vector<cv::Rect> dets = getDetections(); std::vector<cv::Rect> bbs; for(size_t i = 0; i < targets.size(); i++) { bbs.push_back(targets[i]); bbs.push_back(targets[i]); } for(size_t i = 0; i < dets.size(); i++) { bbs.push_back(dets[i]); bbs.push_back(dets[i]); } cv::groupRectangles(bbs, 1, 0.2); gfeats_.clear(); // clear all features; gfeats_idx_.clear(); // clear all features; // find matches int n = 0; for(size_t i = 0; i < prev_feats_idx.size(); i++) { std::vector<int>::iterator it; it = std::find(current_feat_idx.begin(), current_feat_idx.end(), prev_feats_idx[i]); // if found.. if(*it == prev_feats_idx[i]) { // add the features.. int idx = (int)(it - current_feat_idx.begin()); float x = feat_pts[idx].x; float y = feat_pts[idx].y; if(!in_any_rect(bbs, cv::Point2f(x, y))) { cv::Point2f feat(x, y); gfeats_.push_back(feat); gfeats_idx_.push_back(*it); n++; } else { deleted_feats.push_back((int)i); } // remove already selected features.. current_feat_idx.erase(current_feat_idx.begin() + idx); feat_pts.erase(feat_pts.begin() + idx); responses.erase(responses.begin() + idx); } else { deleted_feats.push_back((int)i); } if(n >= max_feats) break; } // add new features!!! while(n < max_feats && current_feat_idx.size() > 0) { std::vector<float>::iterator it; it = std::max_element(responses.begin(), responses.end()); int idx = (int)(it - responses.begin()); // std::cout << "selecting " << idx << ":" << responses[idx] << "=" << *it << std::endl; float x = feat_pts[idx].x; float y = feat_pts[idx].y; if(!in_any_rect(bbs, cv::Point2f(x, y))) { cv::Point2f feat(x, y); gfeats_.push_back(feat); gfeats_idx_.push_back(current_feat_idx[idx]); n++; } // remove already selected features.. current_feat_idx.erase(current_feat_idx.begin() + idx); feat_pts.erase(feat_pts.begin() + idx); responses.erase(responses.begin() + idx); } assert(current_feat_idx.size() == feat_pts.size()); assert(responses.size() == feat_pts.size()); return deleted_feats; }
void pgsql2sqlite(Connection conn, std::string const& query, std::string const& output_table_name, std::string const& output_filename) { namespace sqlite = mapnik::sqlite; sqlite::database db(output_filename); boost::shared_ptr<ResultSet> rs = conn->executeQuery("select * from (" + query + ") as query limit 0;"); int count = rs->getNumFields(); std::ostringstream select_sql; select_sql << "select "; for (int i=0; i<count; ++i) { if (i!=0) select_sql << ","; select_sql << "\"" << rs->getFieldName(i) << "\""; } select_sql << " from (" << query << ") as query"; std::string table_name = mapnik::sql_utils::table_from_sql(query); std::string schema_name=""; std::string::size_type idx=table_name.find_last_of('.'); if (idx!=std::string::npos) { schema_name=table_name.substr(0,idx); table_name=table_name.substr(idx+1); } else { table_name=table_name.substr(0); } std::ostringstream geom_col_sql; geom_col_sql << "select f_geometry_column,srid,type from geometry_columns "; geom_col_sql << "where f_table_name='" << table_name << "'"; if (schema_name.length() > 0) { geom_col_sql <<" and f_table_schema='"<< schema_name <<"'"; } rs = conn->executeQuery(geom_col_sql.str()); int srid = -1; std::string geom_col = "UNKNOWN"; std::string geom_type = "UNKNOWN"; if ( rs->next()) { try { srid = boost::lexical_cast<int>(rs->getValue("srid")); } catch (boost::bad_lexical_cast &ex) { std::clog << ex.what() << std::endl; } geom_col = rs->getValue("f_geometry_column"); geom_type = rs->getValue("type"); } // add AsBinary(<geometry_column>) modifier std::string select_sql_str = select_sql.str(); boost::algorithm::replace_all(select_sql_str, "\"" + geom_col + "\"","AsBinary(" + geom_col+") as " + geom_col); #ifdef MAPNIK_DEBUG std::cout << select_sql_str << "\n"; #endif std::ostringstream cursor_sql; std::string cursor_name("my_cursor"); cursor_sql << "DECLARE " << cursor_name << " BINARY INSENSITIVE NO SCROLL CURSOR WITH HOLD FOR " << select_sql_str << " FOR READ ONLY"; conn->execute(cursor_sql.str()); boost::shared_ptr<CursorResultSet> cursor(new CursorResultSet(conn,cursor_name,10000)); unsigned num_fields = cursor->getNumFields(); if (num_fields == 0) return; std::string feature_id = "fid"; std::ostringstream create_sql; create_sql << "create table if not exists " << output_table_name << " (" << feature_id << " INTEGER PRIMARY KEY AUTOINCREMENT,"; int geometry_oid = -1; std::string output_table_insert_sql = "insert into " + output_table_name + " values (?"; context_ptr ctx = boost::make_shared<context_type>(); for ( unsigned pos = 0; pos < num_fields ; ++pos) { const char* field_name = cursor->getFieldName(pos); ctx->push(field_name); if (pos > 0) { create_sql << ","; } output_table_insert_sql +=",?"; int oid = cursor->getTypeOID(pos); if (geom_col == cursor->getFieldName(pos)) { geometry_oid = oid; create_sql << "'" << cursor->getFieldName(pos) << "' BLOB"; } else { create_sql << "'" << cursor->getFieldName(pos); switch (oid) { case 20: case 21: case 23: create_sql << "' INTEGER"; break; case 700: case 701: create_sql << "' REAL"; break; default: create_sql << "' TEXT"; break; } } } create_sql << ");"; output_table_insert_sql +=")"; std::cout << "client_encoding=" << conn->client_encoding() << "\n"; std::cout << "geometry_column=" << geom_col << "(" << geom_type << ") srid=" << srid << " oid=" << geometry_oid << "\n"; db.execute("begin;"); // output table sql db.execute(create_sql.str()); // spatial index sql std::string spatial_index_sql = "create virtual table idx_" + output_table_name + "_" + geom_col + " using rtree(pkid, xmin, xmax, ymin, ymax)"; db.execute(spatial_index_sql); //blob_to_hex hex; int pkid = 0; std::string spatial_index_insert_sql = "insert into idx_" + output_table_name + "_" + geom_col + " values (?,?,?,?,?)" ; sqlite::prepared_statement spatial_index(db,spatial_index_insert_sql); #ifdef MAPNIK_DEBUG std::cout << output_table_insert_sql << "\n"; #endif sqlite::prepared_statement output_table(db,output_table_insert_sql); while (cursor->next()) { ++pkid; sqlite::record_type output_rec; output_rec.push_back(sqlite::value_type(pkid)); bool empty_geom = true; const char * buf = 0; for (unsigned pos=0 ; pos < num_fields; ++pos) { if (! cursor->isNull(pos)) { int size=cursor->getFieldLength(pos); int oid = cursor->getTypeOID(pos); buf=cursor->getValue(pos); switch (oid) { case 25: case 1042: case 1043: { std::string text(buf); boost::algorithm::replace_all(text,"'","''"); output_rec.push_back(sqlite::value_type(text)); break; } case 23: output_rec.push_back(sqlite::value_type(int4net(buf))); break; case 21: output_rec.push_back(sqlite::value_type(int2net(buf))); break; case 700: { float val; float4net(val,buf); output_rec.push_back(sqlite::value_type(val)); break; } case 701: { double val; float8net(val,buf); output_rec.push_back(sqlite::value_type(val)); break; } case 1700: { std::string str = mapnik::sql_utils::numeric2string(buf); try { double val = boost::lexical_cast<double>(str); output_rec.push_back(sqlite::value_type(val)); } catch (boost::bad_lexical_cast & ex) { std::clog << ex.what() << "\n"; } break; } default: { if (oid == geometry_oid) { mapnik::Feature feat(ctx,pkid); geometry_utils::from_wkb(feat.paths(),buf,size,wkbGeneric); if (feat.num_geometries() > 0) { geometry_type const& geom=feat.get_geometry(0); box2d<double> bbox = geom.envelope(); if (bbox.valid()) { sqlite::record_type rec; rec.push_back(sqlite::value_type(pkid)); rec.push_back(sqlite::value_type(bbox.minx())); rec.push_back(sqlite::value_type(bbox.maxx())); rec.push_back(sqlite::value_type(bbox.miny())); rec.push_back(sqlite::value_type(bbox.maxy())); spatial_index.insert_record(rec); empty_geom = false; } } output_rec.push_back(sqlite::blob(buf,size)); } else { output_rec.push_back(sqlite::null_type()); } break; } } } else { output_rec.push_back(sqlite::null_type()); } } if (!empty_geom) output_table.insert_record(output_rec); if (pkid % 1000 == 0) { std::cout << "\r processing " << pkid << " features"; std::cout.flush(); } if (pkid % 100000 == 0) { db.execute("commit;begin;"); } } // commit db.execute("commit;"); std::cout << "\r processed " << pkid << " features"; db.execute("VACUUM;"); std::cout << "\r vacumming"; std::cout << "\n Done!" << std::endl; }
bool GDBDefinitions::parseFeature(Context &ctx, size_t index, JSDictionary const *d) { auto ident = d->value<JSString>("identifier"); if (ident == nullptr) { fprintf(stderr, "error: GDB feature definition #%zu does " "not specify mandatory identifier key\n", index); return false; } auto filename = d->value<JSString>("filename"); auto contents = d->value<JSArray>("contents"); auto osabi = d->value<JSString>("osabi"); GDBFeature::shared_ptr feat(new GDBFeature); feat->Identifier = ident->value(); if (filename != nullptr) { feat->FileName = filename->value(); } if (osabi != nullptr) { feat->OSABI = osabi->value(); } if (contents != nullptr) { for (size_t n = 0; n < contents->count(); n++) { auto typval = contents->value<JSString>(n); if (typval == nullptr) { fprintf(stderr, "error: GDB feature definition #%zu " "for entry #%zu does not specify a valid " "type\n", index, n); return false; } std::string type, name; type = typval->value(); size_t namepos = type.find(':'); if (namepos == std::string::npos || namepos + 1 >= type.length()) { fprintf(stderr, "error: GDB feature definition #%zu " "for entry #%zu specifies an invalid reference '%s', " "the format is set-type:set-name\n", index, n, type.c_str()); return false; } name = type.substr(namepos + 1); type = type.substr(0, namepos); if (type == "flag-sets") { auto flagset = ctx.FlagSets.find(name); if (flagset == ctx.FlagSets.end()) { fprintf(stderr, "error: GDB feature definition #%zu " "specifies unknown flag set '%s'\n", index, name.c_str()); return false; } else { GDBFeatureEntry::shared_ptr fe(new GDBFeatureEntry); fe->Type = GDBFeatureEntry::kTypeFlagSet; fe->Set.Flag = flagset->second; feat->Entries.push_back(fe); } } else if (type == "gdb-vector-set") { bool found = false; for (auto vec : ctx.GDBVectorSet) { if (vec->Name == name) { GDBFeatureEntry::shared_ptr fe(new GDBFeatureEntry); fe->Type = GDBFeatureEntry::kTypeVectorSet; fe->Set.Vector = vec; feat->Entries.push_back(fe); found = true; break; } } if (!found) { fprintf(stderr, "error: GDB feature definition #%zu " "specifies unknown vector set '%s'\n", index, name.c_str()); return false; } } else if (type == "register-sets") { auto regset = ctx.RegisterSets.find(name); if (regset == ctx.RegisterSets.end()) { fprintf(stderr, "error: GDB feature definition #%zu " "specifies unknown register set '%s'\n", index, name.c_str()); return false; } else { GDBFeatureEntry::shared_ptr fe(new GDBFeatureEntry); fe->Type = GDBFeatureEntry::kTypeRegisterSet; fe->Set.Register = regset->second; feat->Entries.push_back(fe); } } else { fprintf(stderr, "error: GDB feature definition #%zu " "for entry '%s' specifies an unknown type '%s'\n", index, name.c_str(), type.c_str()); } } } _features.push_back(feat); return true; }
int main(int argc, char* argv[]) { int rc = 0; #ifdef HAVE_GDAL try { ::OGRRegisterAll(); // Parse command-line options std::string in_file; std::string out_file; std::string out_frmt; { int on_arg = 1; while (on_arg < argc) { std::string arg(argv[on_arg]); if (arg == "-h") { usage(); return 0; } else if (arg == "-formats") { report_ogr_formats(std::cout); return 0; } else if (arg == "-i" && (on_arg + 1 < argc)) { ++on_arg; assert(on_arg < argc); in_file = argv[on_arg]; } else if (arg == "-o" && (on_arg + 1 < argc)) { ++on_arg; assert(on_arg < argc); out_file = argv[on_arg]; out_frmt = "ESRI Shapefile"; // default output format } else if (arg == "-f" && (on_arg + 1 < argc)) { ++on_arg; assert(on_arg < argc); out_frmt = argv[on_arg]; } else { throw std::runtime_error(std::string("unrecognized parameter: ") + arg); } ++on_arg; } if (in_file.empty() || out_file.empty() || out_frmt.empty()) { throw std::runtime_error("missing input paremeters"); } } // // Source // std::cout << "Source:" << "\n - dataset: " << in_file << std::endl; std::ifstream ifs; if (!liblas::Open(ifs, in_file.c_str())) { throw std::runtime_error(std::string("Can not open \'") + in_file + "\'"); } liblas::Reader reader(ifs); // // Target // std::string const lyrname(out_file.substr(0, out_file.find_last_of('.'))); std::cout << "Target:" << "\n - format: " << out_frmt << "\n - dataset: " << out_file << "\n - layer: " << lyrname << std::endl; OGRSFDriverH drv = OGRGetDriverByName(out_frmt.c_str()); if (0 == drv) { throw std::runtime_error(out_frmt + " driver not available"); } ogr_wrapper<OGRDataSourceH> ds(OGR_Dr_CreateDataSource(drv, out_file.c_str(), 0), OGR_DS_Destroy); if (0 == ds.get()) { throw std::runtime_error(out_file + " datasource creation failed"); } OGRLayerH lyr = OGR_DS_CreateLayer(ds, lyrname.c_str(), 0, wkbPoint25D, 0); if (0 == lyr) { throw std::runtime_error(out_file + " layer creation failed"); } // Prepare layer schema create_layer_def(lyr); // // Translation of points cloud to features set // boost::uint32_t i = 0; boost::uint32_t const size = reader.GetHeader().GetPointRecordsCount(); std::cout << "Translating " << size << " points:\n"; ogr_wrapper<OGRFeatureH> feat(OGR_F_Create(OGR_L_GetLayerDefn(lyr)), OGR_F_Destroy); while (reader.ReadNextPoint()) { liblas::Point const& p = reader.GetPoint(); OGR_F_SetFieldInteger(feat, 0, p.GetReturnNumber()); OGR_F_SetFieldInteger(feat, 1, p.GetScanAngleRank()); OGR_F_SetFieldInteger(feat, 2, p.GetIntensity()); std::ostringstream os; os << p.GetClassification(); OGR_F_SetFieldString(feat, 3, os.str().c_str()); OGR_F_SetFieldInteger(feat, 4, p.GetNumberOfReturns()); OGR_F_SetFieldDouble(feat, 5, p.GetTime()); ogr_wrapper<OGRGeometryH> geom(OGR_G_CreateGeometry(wkbPoint25D), OGR_G_DestroyGeometry); OGR_G_SetPoint(geom, 0, p.GetX(), p.GetY(), p.GetZ()); if (OGRERR_NONE != OGR_F_SetGeometry(feat, geom)) { throw std::runtime_error("geometry creation failed"); } if (OGRERR_NONE != OGR_L_CreateFeature(lyr, feat)) { throw std::runtime_error("feature creation failed"); } term_progress(std::cout, (i + 1) / static_cast<double>(size)); i++; } std::cout << std::endl; } catch (std::exception const& e) { std::cerr << "Error: " << e.what() << std::endl; rc = -1; } catch (...) { std::cerr << "Unknown error\n"; rc = -1; } #else std::cout << "Missing GDAL/OGR support built-in las2ogr. Aborted." << std::endl; #endif // #ifdef HAVE_GDAL ::OGRCleanupAll(); return rc; }
std::shared_ptr<TileData> ClientGeoJsonSource::parse(const Tile& _tile, std::vector<char>& _rawData) const { if (!m_store) { return nullptr; } auto data = std::make_shared<TileData>(); auto id = _tile.getID(); auto tile = m_store->getTile(id.z, id.x, id.y); // uses a mutex lock internally for thread-safety Layer layer(""); // empty name will skip filtering by 'collection' for (auto& it : tile.features) { Feature feat(m_id); const auto& geom = it.tileGeometry; const auto type = it.type; switch (type) { case geojsonvt::TileFeatureType::Point: { feat.geometryType = GeometryType::points; for (const auto& pt : geom) { const auto& point = pt.get<geojsonvt::TilePoint>(); feat.points.push_back(transformPoint(point)); } break; } case geojsonvt::TileFeatureType::LineString: { feat.geometryType = GeometryType::lines; for (const auto& r : geom) { Line line; for (const auto& pt : r.get<geojsonvt::TileRing>().points) { line.push_back(transformPoint(pt)); } feat.lines.emplace_back(std::move(line)); } break; } case geojsonvt::TileFeatureType::Polygon: { feat.geometryType = GeometryType::polygons; for (const auto& r : geom) { Line line; for (const auto& pt : r.get<geojsonvt::TileRing>().points) { line.push_back(transformPoint(pt)); } // Polygons are in a flat list of rings, with ccw rings indicating // the beginning of a new polygon if (signedArea(line) >= 0 || feat.polygons.empty()) { feat.polygons.emplace_back(); } feat.polygons.back().push_back(std::move(line)); } break; } default: break; } feat.props = *it.tags.map; layer.features.emplace_back(std::move(feat)); } data->layers.emplace_back(std::move(layer)); return data; }
void fImgSvm::test_libsvm2() { init_shogun(&print_message); const int32_t feature_cache=0; const int32_t kernel_cache=0; const float64_t rbf_width=10; const float64_t svm_C=10; const float64_t svm_eps=0.001; int32_t num=mtrainimgsum; int32_t dims=SIFTN; float64_t dist=0.5; SGVector<float64_t> lab(num); //标签 SGMatrix<float64_t> feat(dims, num); //gen_rand_data(lab, feat, dist); for(int i = 0 ; i < num ; i ++ ) { for(int j = 0 ; j < dims ; j ++ ) { feat(j,i) = imgvec[i][j]; } } for(int i = 0 ; i < num ; i ++ ) { //lab[i] = imglabelvec[i]*1.0; if(imgtrainlabelvec[i] == 1) lab[i] = -1.0; else lab[i] = 1.0; } // create train labels CLabels* labels=new CBinaryLabels(lab); // create train features CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(feature_cache); SG_REF(features); features->set_feature_matrix(feat); // create gaussian kernel CGaussianKernel* kernel=new CGaussianKernel(kernel_cache, rbf_width); SG_REF(kernel); kernel->init(features, features); // create svm via libsvm and train CLibSVM* svm=new CLibSVM(svm_C, kernel, labels); SG_REF(svm); svm->set_epsilon(svm_eps); svm->train(); SG_SPRINT("num_sv:%d b:%f\n", svm->get_num_support_vectors(), svm->get_bias()); // classify + display output CBinaryLabels* out_labels=CBinaryLabels::obtain_from_generic(svm->apply()); for (int32_t i=0; i<num; i++) { SG_SPRINT("out[%d]=%f (%f)\n", i, out_labels->get_label(i), out_labels->get_confidence(i)); } CBinaryLabels* result = CBinaryLabels::obtain_from_generic (svm->apply(features) ); for (int32_t i=0; i<3; i++) SG_SPRINT("output[%d]=%f\n", i, result->get_label(i)); // update // predict the printf("----------------test -----------------\n"); getTestImg(imgtestvec); int32_t testnum = mtestingsum; SGMatrix<float64_t> testfeat(dims, testnum); for(int i = 0 ; i < testnum ; i ++ ) { for(int j = 0 ; j < dims ; j ++ ) { testfeat(j,i) = imgtestvec[i][j]; } } CDenseFeatures<float64_t>* testfeatures=new CDenseFeatures<float64_t>(feature_cache); SG_REF(testfeatures); testfeatures->set_feature_matrix(testfeat); CBinaryLabels* testresult = CBinaryLabels::obtain_from_generic (svm->apply(testfeatures) ); int32_t rightnum1 = 0; int32_t rightsum1 = 0; int32_t rightnum2 = 0; for (int32_t i=0; i<testnum; i++) { SG_SPRINT("output[%d]=%f\n", i, testresult->get_label(i)); if(imgtestlabelvec[i] == 1 ) { if( (testresult->get_label(i)) < 0.0) { rightnum1 ++; } rightsum1 ++ ; } else if(imgtestlabelvec[i] == 2 && testresult->get_label(i) > 0.0) { rightnum2 ++ ; } } printf(" %lf\n ",(rightnum1+rightnum2)*1.0 / testnum); printf("class 1 : %lf\n",rightnum1 *1.0 / rightsum1); printf("class 2 : %lf\n",rightnum2 *1.0 / (testnum - rightsum1)); SG_UNREF(out_labels); SG_UNREF(kernel); SG_UNREF(features); SG_UNREF(svm); exit_shogun(); }
void test_EKF_remove_feature_landmark_noise_ptz_data() { vcl_string ptz_file("/Users/jimmy/Desktop/images/33_slam_data/ptz_145420_145719.txt"); vcl_vector<PTZData> ptzs; bool isRead = readPTZCameraFile(ptz_file.c_str(), ptzs, 1); assert(isRead); vcl_vector<double> gdPans; vcl_vector<double> gdTilts; vcl_vector<double> gdZooms; vcl_vector<double> observedPans; vcl_vector<double> observedTilts; vcl_vector<double> observedZooms; vcl_vector<vcl_vector<vgl_point_2d<double> > > observedImagePts; const int width = 1280; const int height = 720; vnl_random rnd; double delta = 2.0; vgl_point_2d<double> pp(width/2, height/2); PTZKeypointDynamicEKF ptzDynamicEKF; // const int M = (int)firstFeatures.size(); vnl_vector<double> C0(6, 0); C0[0] = ptzs[0].pan; C0[1] = ptzs[0].tilt; C0[2] = ptzs[0].fl; vnl_matrix<double> CP0(6, 6, 0); CP0(0, 0) = 0.01; CP0(1, 1) = 0.01; CP0(2, 2) = 10; CP0(3, 3) = 0.001; CP0(4, 4) = 0.001; CP0(5, 5) = 0.1; vnl_matrix<double> CQ0(6, 6, 0); CQ0(0, 0) = 0.00000004; CQ0(1, 1) = 0.00000004; CQ0(2, 2) = 0.0000004; CQ0(3, 3) = 0.00000001; CQ0(4, 4) = 0.00000001; CQ0(5, 5) = 0.0000001; // construct feature a database vcl_vector<vgl_point_2d<double> > courtPoints = DisneyWorldBasketballCourt::getCalibPoints(); vcl_list<VxlEKFFeaturePoint> featureDatabase; for (int i = 0; i<courtPoints.size(); i++) { vgl_point_3d<double> p(courtPoints[i].x(), courtPoints[i].y(), 0); vgl_point_2d<double> q(0, 0); VxlEKFFeaturePoint feat(p, q); feat.id_ = i; featureDatabase.push_back(feat); } // init camera and feature from ground truth of data set VxlEKFCamera camera(C0, CP0, CQ0); vcl_list<VxlEKFFeaturePoint> features; for (int i = 0; i<courtPoints.size(); i++) { vgl_homg_point_3d<double> p(courtPoints[i].x(), courtPoints[i].y(), 0, 1.0); if (camera.is_behind_camera(p)) { continue; } vgl_point_2d<double> q= camera.project(p); if (vgl_inside_image(q, width, height, 10)) { VxlEKFFeaturePoint feat(p, q); feat.id_ = i; features.push_back(feat); } } printf("initiate feature number is %lu\n", features.size()); vnl_vector<double> Xk; vnl_matrix<double> Pk; bool isInit = ptzDynamicEKF.updateCameraFeature(camera, features, Xk, Pk); assert(isInit); vcl_vector<double> smoothedPans; vcl_vector<double> smoothedTilts; vcl_vector<double> smoothedZooms; for (int i = 1; i<ptzs.size(); i++) { // ptzs.size() gdPans.push_back(ptzs[i].pan); gdTilts.push_back(ptzs[i].tilt); gdZooms.push_back(ptzs[i].fl); vpgl_perspective_camera<double> gdCamera; bool isCamera = VxlPTZCamera::PTZToCamera(ptzs[i].fl, ptzs[i].pan, ptzs[i].tilt, gdCamera); assert(isCamera); // remove features vcl_list<VxlEKFFeaturePoint>::iterator it = features.begin(); while (it != features.end()) { vgl_point_3d<double> p = it->worldPt(); if (gdCamera.is_behind_camera(vgl_homg_point_3d<double>(p.x(), p.y(), p.z(), 1.0))) { it = features.erase(it); printf("erase a feature\n"); continue; } vgl_point_2d<double> q = gdCamera.project(p); if (!vgl_inside_image(q, width, height, 10)) { it = features.erase(it); printf("erase a feature\n"); continue; } it++; } printf("feature number is %lu\n", features.size()); // add features for (vcl_list<VxlEKFFeaturePoint>::iterator it = featureDatabase.begin(); it != featureDatabase.end(); it++) { vgl_point_3d<double> p = it->worldPt(); if (gdCamera.is_behind_camera(vgl_homg_point_3d<double>(p.x(), p.y(), p.z(), 1.0))) { continue; } vgl_point_2d<double> q = gdCamera.project(p); if (vgl_inside_image(q, width, height, 10)) { vcl_list<VxlEKFFeaturePoint>::iterator findIt = std::find(features.begin(), features.end(), *it); // cannot find same featuere (defined by id) in the features if (findIt == features.end()) { VxlEKFFeaturePoint feat(p, q); feat.id_ = it->id_; features.push_back(feat); printf("add a new feature with id %d\n", (int)feat.id_); } } } vcl_vector<vgl_point_2d<double> > worldPts; vcl_vector<vgl_point_2d<double> > imagePts; // add noise to current observation for (vcl_list<VxlEKFFeaturePoint>::iterator it = features.begin(); it != features.end(); it++) { vgl_point_3d<double> p = it->worldPt(); vgl_point_2d<double> q = gdCamera.project(p); double x = q.x(); double y = q.y(); x += delta * rnd.normal(); y += delta * rnd.normal(); it->setImagePoint(x, y); worldPts.push_back(vgl_point_2d<double>(p.x(), p.y())); imagePts.push_back(it->imagePt()); } vpgl_perspective_camera<double> initCamera; vpgl_perspective_camera<double> finalCamera; bool isInit = VpglPlus::init_calib(worldPts, imagePts, pp, initCamera); if (!isInit) { printf("initiate camera error\n"); continue; } bool isFinal = VpglPlus::optimize_perspective_camera(worldPts, imagePts, initCamera, finalCamera); if (!isFinal) { printf("final camera error\n"); continue; } // double pan = 0; double tilt = 0; double zoom = 0; bool isPTZ = VxlPTZCamera::CameraToPTZ(finalCamera, pan, tilt, zoom); assert(isPTZ); observedPans.push_back(pan); observedTilts.push_back(tilt); observedZooms.push_back(zoom); printf("observed pan tilt focal length is %f %f %f\n", pan, tilt, zoom); ptzDynamicEKF.updateCameraFeature(camera, features, Xk, Pk); printf("gd pan tilt focal length is %f %f %f\n\n", ptzs[i].pan, ptzs[i].tilt, ptzs[i].fl); smoothedPans.push_back(Xk[0]); smoothedTilts.push_back(Xk[1]); smoothedZooms.push_back(Xk[2]); } //save vnl_vector<double> gdPanVec(&gdPans[0], (int)gdPans.size()); vnl_vector<double> observedPanVec(&observedPans[0], (int)observedPans.size()); vnl_vector<double> smoothedPanVec(&smoothedPans[0], (int)smoothedPans.size()); vnl_vector<double> gdZoomVec(&gdZooms[0], (int)gdZooms.size()); vnl_vector<double> observedZoomVec(&observedZooms[0], (int)observedZooms.size()); vnl_vector<double> smoothedZoomVec(&smoothedZooms[0], (int)smoothedZooms.size()); vnl_vector<double> gdTiltVec(&gdTilts[0], (int)gdTilts.size()); vnl_vector<double> observedTiltVec(&observedTilts[0], (int)observedTilts.size()); vnl_vector<double> smoothedTiltVec(&smoothedTilts[0], (int)observedTilts.size()); vcl_string save_file("EKF_dynamic_ptz.mat"); vnl_matlab_filewrite awriter(save_file.c_str()); awriter.write(gdPanVec, "gdPan"); awriter.write(observedPanVec, "observedPan"); awriter.write(smoothedPanVec, "sm_pan"); awriter.write(gdZoomVec, "gdZoom"); awriter.write(observedZoomVec, "observedZoom"); awriter.write(smoothedZoomVec, "smoothedZoom"); awriter.write(gdTiltVec, "gdTilt"); awriter.write(vnl_vector<double>(&observedTilts[0], (int)observedTilts.size()), "observedTilt"); awriter.write(vnl_vector<double>(&smoothedTilts[0], (int)observedTilts.size()), "smoothedTilt"); printf("save to %s\n", save_file.c_str()); }
virtual void THallEffect::filterTo(TSignal & s) { TSignal * in; unsigned int NumberOfPoints; // если это комбинированный сигнал. if( s[0]<-1.0 && s.back() >1.0) { feat(); // его надо усреднить // фильтровать будем усредненный сигнал } // если это обычный сигнал - фильтруем как есть. in=signal; NumberOfPoints=in.size(); // В эти массивы будут достраиваться данные для отрицательных магнитных полей. // Это очень мило, а если это сигнал для отрицательного магнитного поля? // То теоретически достраивается положительная часть. // Надо пофиксить тут комменты на адекватные действительности. TSignal tempIn(2*NumberOfPoints); TSignal tempOut(2*NumberOfPoints); // формируем сигнал для фильтра. // достраивая его в отрицательные магнитные поля. for (unsigned int i = 0; i < NumberOfPoints; i++) { /* Давайте внимательно сюда посмотрим. У эффекта Холла отрицательная симметрия, относительно точки B==0; С чего вообще я взял, что это нулевой элемент? /(О_о)\ Получается для сигнала с положительным магнитным полем - это выполняется. Для сигнала комбинированного, т.е. уже объединенного - это выполняется, потому что фильтруется усредненный сигнал (по сути имеющий только значения положительного магнитного поля. Для отрицательного магнитного поля сие равенство, насколько мне ясно - не выполняется. */ tempIn[i]=-(*inB)[NumberOfPoints-i-1]; tempIn[i+NumberOfPoints]=(*in)[i]; } /* В случае отрицательного магнитного поля надо инвертировать порядок элементов Потому что впереди выстраиваются значения для положительного магнитного поля. */ if(tempIn[0]>1.0) { std::reverse(tempIn.begin(),tempIn.end()); } // фильтруем TrForMassiveFilter(tempIn,tempOut,filterParams.filterLength,filterParams.SamplingFrequecy, filterParams.BandwidthFrequency,filterParams.AttenuationFrequency); // Размер внутри фильтра меняется, в зависимости от длины фильтра. NumberOfPoints=tempOut.size(); s.clear(); for(unsigned int i=fP.filterLength;i<NumberOfPoints;i++) { s.push_back(tempOut[i]); } }
int main(int argc, char **argv) { if (argc < 5) { cout<<"Usage: "<<argv[0]<<" projMatList rMeanList codeBookList outputBase [windowSize] [stepSize] [intSize]"<<endl; return 0; } string projMatList(argv[1]); string rMeanList(argv[2]); string codeBookList(argv[3]); string outputBase(argv[4]); int windowSize = 100; int stepSize = 100; int intSize = 100; if (argc > 5) windowSize = atoi(argv[5]); if (argc > 6) stepSize = atoi(argv[6]); if (argc > 7) intSize = atoi(argv[7]); if (windowSize % intSize != 0 || stepSize % intSize != 0) { cout<<"Integral size must be the gcd of window size and step size."<<endl; return 0; } string types[5] = {"traj", "hog", "hof", "mbhx", "mbhy"}; vector<TeVector*> tevs(5, NULL); ifstream fin1, fin2, fin3; fin1.open(projMatList.c_str()); if (!fin1.is_open()) { cout<<"Cannot open "<<projMatList<<endl; return 0; } fin2.open(codeBookList.c_str()); if (!fin2.is_open()) { cout<<"Cannot open "<<codeBookList<<endl; return 0; } fin3.open(rMeanList.c_str()); if (!fin3.is_open()) { cout<<"Cannot open "<<rMeanList<<endl; return 0; } string projMatFile, codeBookFile, rMeanFile; //for (int i = 0; i < fvs.size(); i++) { for (int i = 1; i < tevs.size(); i++) { getline(fin1, projMatFile); getline(fin2, codeBookFile); getline(fin3, rMeanFile); tevs[i] = new TeVector(codeBookFile, rMeanFile, projMatFile); tevs[i]->initTeV(intSize); // 1 layer of spatial pyramids } fin1.close(); fin2.close(); fin3.close(); string line; cout<<"Start loading points..."<<endl; while (getline(cin, line)) { DTFeature feat(line); //TODO: Store feature of DT with vector<double> //vector<double> traj(feat.traj, feat.traj+TRAJ_DIM); vector<double> hog(feat.hog, feat.hog+HOG_DIM); vector<double> hof(feat.hof, feat.hof+HOF_DIM); vector<double> mbhx(feat.mbhx, feat.mbhx+MBHX_DIM); vector<double> mbhy(feat.mbhy, feat.mbhy+MBHY_DIM); //tevs[0]->addPoint(traj, feat.frameNum); tevs[1]->addPoint(hog, feat.frameNum); tevs[2]->addPoint(hof, feat.frameNum); tevs[3]->addPoint(mbhx, feat.frameNum); tevs[4]->addPoint(mbhy, feat.frameNum); } cout<<"Points load complete."<<endl; //for (int i = 0; i < tevs.size(); i++) { for (int i = 1; i < tevs.size(); i++) { ofstream fout; string outName = outputBase + "." + types[i] + ".tev.txt"; fout.open(outName.c_str()); vector<double> tev = tevs[i]->getTeV(); fout<<tev[0]; for (int j = 1; j < tev.size(); j++) fout<<" "<<tev[j]; fout<<endl; fout.close(); outName = outputBase + "." + types[i] + ".tev.seq"; fout.open(outName.c_str()); vector<vector<double> > localTeVs = tevs[i]->getTeV(stepSize, windowSize); for (int j = 0; j < localTeVs.size(); j++) { fout<<localTeVs[j][0]; for (int k = 1; k < localTeVs[j].size(); k++) fout<<" "<<localTeVs[j][k]; fout<<endl; } fout.close(); tevs[i]->clearTeV(); } for (int i = 1; i < tevs.size(); i++) //for (int i = 0; i < fvs.size(); i++) delete tevs[i]; return 0; }