virtual BOOL on_event (HELEMENT he, HELEMENT target, BEHAVIOR_EVENTS type, UINT_PTR reason ) { switch( type ) { case POPUP_READY: { element ePopup(target); if (!ePopup.is_valid() || ePopup.children_count()) return true; if (sg_vFontNames.empty()) { LOGFONT lf; lf.lfFaceName[0] = L'\0'; lf.lfCharSet = DEFAULT_CHARSET/*ANSI_CHARSET*/; HDC hDC = ::GetDC(NULL); EnumFontFamiliesEx(hDC, &lf, (FONTENUMPROC)xEnumFontsProc, NULL, 0); ::ReleaseDC(NULL,hDC); sg_vFontNames.shrink_to_fit(); } ePopup.set_style_attribute("max-height",L"200"); auto itrEnd = sg_vFontNames.end(); element eItem; for (auto itr = sg_vFontNames.begin(); itr != itrEnd; ++itr) { eItem = element::create("option"); ePopup.append(eItem); eItem.set_text((*itr).c_str()); eItem.set_style_attribute("font-family",(*itr).c_str()); } return true; } } return false; }
//----------------------------------------------------------------------------- std::pair<std::int32_t, std::int32_t> GraphBuilder::compute_dual_graph(const MPI_Comm mpi_comm, const boost::multi_array<std::int64_t, 2>& cell_vertices, const CellType& cell_type, const std::int64_t num_global_vertices, std::vector<std::vector<std::size_t>>& local_graph, std::set<std::int64_t>& ghost_vertices) { log(PROGRESS, "Build mesh dual graph"); // Compute local part of dual graph FacetCellMap facet_cell_map; std::int32_t num_local_edges = compute_local_dual_graph(mpi_comm, cell_vertices, cell_type, local_graph, facet_cell_map); // Compute nonlocal part std::int32_t num_nonlocal_edges = compute_nonlocal_dual_graph(mpi_comm, cell_vertices, cell_type, num_global_vertices, local_graph, facet_cell_map, ghost_vertices); // Shrink to fit local_graph.shrink_to_fit(); return {num_local_edges, num_nonlocal_edges}; }
//############################################################################## //############################################################################## static std::vector<boost::asio::ip::address> get_peer_addresses(std::vector<std::string> peers) { range::Emitter log { get_peer_addressesLogModule }; RANGE_LOG_FUNCTION(); std::sort(peers.begin(), peers.end()); std::unique(peers.begin(), peers.end()); peers.shrink_to_fit(); std::vector<boost::asio::ip::address> addrs; boost::asio::io_service io_service; for (std::string peer : peers) { boost::asio::ip::udp::resolver resolver(io_service); boost::asio::ip::udp::resolver::query query(peer, ""); try { for(auto it = resolver.resolve(query); it != decltype(it)(); ++it) { addrs.push_back(it->endpoint().address()); } } catch (boost::system::system_error &e) { LOG(warning, "unable_to_resolve") << peer; } } std::sort(addrs.begin(), addrs.end()); std::unique(addrs.begin(), addrs.end()); addrs.shrink_to_fit(); return addrs; }
void Peer::considerOffers(std::vector<std::pair<Peer*, std::vector<size_t>>>& offers) { // Sanity check: We should only be getting offers for things we don't have #ifndef NDEBUG for (auto& offerSet : offers) { for (size_t offer : offerSet.second) assert(!chunkList[offer]); } #endif assert(consideredOffers.empty()); auto popularity = getChunkPopularity(); // Coalesce our offers into one big list for (auto& offerSet : offers) { for (size_t offer : offerSet.second) consideredOffers.emplace_back(offerSet.first, offer); } // We can free up the offers vector. We're all done with it. // TODO: Should we do this? Could be shooting ourselves in the foot // if we forget later. Oh well. offers.clear(); offers.shrink_to_fit(); // Lets's sort all of our offers by how popular they are sort(begin(consideredOffers), end(consideredOffers), [&] (const Offer& a, const Offer& b) { return popularity[a.chunkIdx].second < popularity[b.chunkIdx].second; }); }
void end_train(void) { ds = new AnnoyIndex<int, int64_t, Hamming, Kiss32Random>((pointset[0]).size()); for (int i = 0; i < pointset.size(); i++) { ds->add_item(i, mapEntry(pointset[i])); } pointset.clear(); pointset.shrink_to_fit(); ds->build(num_trees); }
/** * * rct2: 0x006D41DC */ static void window_install_track_close(rct_window* w) { _trackPath.clear(); _trackName.clear(); _trackDesignPreviewPixels.clear(); _trackDesignPreviewPixels.shrink_to_fit(); track_design_dispose(_trackDesign); _trackDesign = nullptr; }
/* * FixColFile * Fixes the COLFILE from gta.dat not working properly, crashing the game. */ void FixColFile() { static bool using_colbuf; // Is using colbuf or original buffer? static bool empty_colmodel; // Is this a empty colmodel? static std::vector<char> colbuf; // Buffer for reading col file into // Prototype for patches using rcolinfo_f = int(void*, uint32_t*, size_t); using rcolmodel_f = int(void*, char*, size_t); using lcol_f = int(char*, int, void*, char*); using rel_f = int(void*); // Fixes the crash caused by using COLFILE for a building etc ColModelPool_new = MakeCALL(0x5B4F2E, HOOK_LoadColFileFix).get(); // Reads collision info and check if we need to use our own collision buffer auto ReadColInfo = [](std::function<rcolinfo_f> Read, void*& f, uint32_t*& buffer, size_t& size) { auto r = Read(f, buffer, size); empty_colmodel = (buffer[1] <= 0x18); if(using_colbuf = !empty_colmodel) colbuf.resize(buffer[1]); return r; }; // Replace buffer if necessary auto ReadColModel = [](std::function<rcolmodel_f> Read, void*& f, char*& buffer, size_t& size) { if(!empty_colmodel) return Read(f, using_colbuf? colbuf.data() : buffer, size); return 0; }; // Replace buffer if necessary auto LoadCollisionModel = [](std::function<lcol_f> LoadColModel, char*& buf, int& size, void*& colmodel, char*& modelname) { return LoadColModel(using_colbuf? colbuf.data() : buf, size, colmodel, modelname); }; // Frees our buffer auto ReleaseBuffer = [](std::function<rel_f> fclose, void*& f) { colbuf.clear(); colbuf.shrink_to_fit(); return fclose(f); }; // Patches make_static_hook<function_hooker<0x5B4EF4, rcolmodel_f>>(ReadColModel); make_static_hook<function_hooker<0x5B4E92, rcolinfo_f>>(ReadColInfo); make_static_hook<function_hooker<0x5B4FCC, rcolinfo_f>>(ReadColInfo); make_static_hook<function_hooker<0x5B4FA0, lcol_f>>(LoadCollisionModel); make_static_hook<function_hooker<0x5B4FB5, lcol_f>>(LoadCollisionModel); make_static_hook<function_hooker<0x5B4F83, lcol_f>>(LoadCollisionModel); make_static_hook<function_hooker<0x5B92F9, rel_f>>(ReleaseBuffer); }
/** * Reads the beginning of an .osrm file and produces: * - list of barrier nodes * - list of traffic lights * - nodes indexed by their internal (non-osm) id */ NodeID loadNodesFromFile(std::istream &input_stream, std::vector<NodeID> &barrier_node_list, std::vector<NodeID> &traffic_light_node_list, std::vector<extractor::QueryNode> &node_array) { const FingerPrint fingerprint_valid = FingerPrint::GetValid(); FingerPrint fingerprint_loaded; input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint)); if (!fingerprint_loaded.TestContractor(fingerprint_valid)) { SimpleLogger().Write(logWARNING) << ".osrm was prepared with different build.\n" "Reprocess to get rid of this warning."; } NodeID n; input_stream.read(reinterpret_cast<char *>(&n), sizeof(NodeID)); SimpleLogger().Write() << "Importing n = " << n << " nodes "; extractor::ExternalMemoryNode current_node; for (NodeID i = 0; i < n; ++i) { input_stream.read(reinterpret_cast<char *>(¤t_node), sizeof(extractor::ExternalMemoryNode)); node_array.emplace_back(current_node.lon, current_node.lat, current_node.node_id); if (current_node.barrier) { barrier_node_list.emplace_back(i); } if (current_node.traffic_lights) { traffic_light_node_list.emplace_back(i); } } // tighten vector sizes barrier_node_list.shrink_to_fit(); traffic_light_node_list.shrink_to_fit(); return n; }
void createSearchKey(unsigned int numberRows, unsigned int NBFS, std::vector<int> &search_key, const Eigen::SparseMatrix<int> &EdgeMatrix) { //columndegree contains number of nonzeros per column //for removing searchkey values that are not connected to main graph std::vector<int> columndegree; columndegree.reserve(numberRows); for (unsigned int i = 0; i < numberRows; i++) { columndegree.push_back(EdgeMatrix.outerIndexPtr()[i+1]-EdgeMatrix.outerIndexPtr()[i]); } //generate search key values based on time seed std::mt19937 generator(std::chrono::system_clock::now().time_since_epoch()/std::chrono::seconds(1)); std::cout << "creating search key vector" << std::endl; for (unsigned int i = 0; i < numberRows; i++) { search_key.push_back(i); } //shuffle search key values std::shuffle(search_key.begin(),search_key.end(),generator); //take first 64 or entire search key, whichever is smaller if (search_key.size() > NBFS) { for (unsigned int i = 0; i < NBFS+20; i++) { //remove search key values that aren't connected to main graph if (columndegree.at(search_key.at(i)) == 0) { search_key.erase(search_key.begin()+i); i--; } } search_key.erase(search_key.begin()+NBFS, search_key.end()); } std::cout << "Removing search keys with no edges" << std::endl; for (unsigned int i = 0; i < search_key.size(); i++) { //remove search key values that aren't connected to main graph if (columndegree.at(search_key.at(i)) == 0) { search_key.erase(search_key.begin()+i); i--; } } search_key.shrink_to_fit(); }
inline static void alacv_get_nodes(std::vector<int> &node_set, bool ready) { node_set.clear(); for(std::size_t i = 0, max_i = _alacrity_vector.size(); i < max_i; ++i) { flag_t flag_block = _alacrity_vector[i]; for (std::size_t j = 0, max_j = min(_max_flag_count, FLAG_TYPE_BIT_COUNT); j < max_j; ++j) { if ((flag_block & (__ROTATE_LEFT(1, j))) == static_cast<unsigned int>(ready)) node_set.push_back(i * FLAG_TYPE_BIT_COUNT + j); } } node_set.shrink_to_fit(); }
/** * * rct2: 0x006D0119 */ static void window_track_place_close(rct_window *w) { window_track_place_clear_provisional(); viewport_set_visibility(0); map_invalidate_map_selection_tiles(); gMapSelectFlags &= ~MAP_SELECT_FLAG_ENABLE_CONSTRUCT; gMapSelectFlags &= ~MAP_SELECT_FLAG_ENABLE_ARROW; hide_gridlines(); _window_track_place_mini_preview.clear(); _window_track_place_mini_preview.shrink_to_fit(); track_design_dispose(_trackDesign); _trackDesign = nullptr; }
void SetActiveCodes(const std::vector<GeckoCode>& gcodes) { std::lock_guard<std::mutex> lk(s_active_codes_lock); s_active_codes.clear(); if (SConfig::GetInstance().bEnableCheats) { s_active_codes.reserve(gcodes.size()); std::copy_if(gcodes.begin(), gcodes.end(), std::back_inserter(s_active_codes), [](const GeckoCode& code) { return code.enabled; }); } s_active_codes.shrink_to_fit(); s_code_handler_installed = Installation::Uninstalled; }
void clear() { // Delete[]'ing ptr's to all Buckets for (auto bucket : bucket_list) { if (nullptr != bucket) { delete[] bucket; bucket = nullptr; } } bucket_list.clear(); bucket_list.shrink_to_fit(); current_size = 0; }
void Terminate() { TwTerminate(); glDeleteBuffers(1, &Material::UBO); glDeleteBuffers(1, &PointLight::UBO); glDeleteBuffers(1, &g_Camera.UBO); g_Spheres.shrink_to_fit(); CleanMesh(g_SphereMesh); glDeleteTextures(3, g_Walls.textures); CleanMesh(g_WallMesh); g_BlinnPhongShader.Destroy(); g_AmbientShader.Destroy(); }
void algorithms::KDDecomposer<IROBOT>::get_unique_crn_cfgs(std::vector<const CONFIG*>& result, std::vector<CONFIG>& all_corners) const { std::clock_t t0 = std::clock(); for (const Cell<IROBOT>& cell : cells) { cell.get_corner_configs(all_corners); } all_corners.shrink_to_fit(); const CONFIG** temp = new const CONFIG*[all_corners.size()]; for (int i = 0; i < all_corners.size(); ++i) temp[i] = &all_corners[i]; std::sort(temp, temp + all_corners.size(), IROBOT::cmp); const CONFIG** end = unique( temp, temp + all_corners.size(), IROBOT::approximate); result.insert(result.begin(), temp, end); delete[] temp; std::clock_t t1 = std::clock(); }
void primeList(std::vector<unsigned int> &rvPrimes){ unsigned int loc = 0, sz = rvPrimes.size(); // 'loc' is current place in vector int pVal; #define D(i) C(loc, i, pVal) while (loc < sz){ if (rvPrimes[loc]){ pVal = rvPrimes[loc]; for (unsigned int i = 1; D(i) < sz; ++i) rvPrimes[D(i)] = 0; } loc++; } rvPrimes.erase(std::remove(std::begin(rvPrimes), std::end(rvPrimes), 0), std::end(rvPrimes)); //erase-remove idiom; removes all 0s rvPrimes.shrink_to_fit(); //free up a little memory by reducing capacity() down to actual size() return; }
void end_train(void) { size_t b = pointset[0].size(); B = b; size_t n = pointset.size(); dataset = create_dataset(pointset); pointset.clear(); pointset.shrink_to_fit(); if (r > 0) { r = n / r; // use r-fraction of the dataset for ordering. int* order = new int[B]; greedyorder(order, dataset, r, B, chunks); UINT8* new_dataset = new UINT8[n * B/8]; reorder(new_dataset, dataset, n, B, order); delete[] dataset; dataset = new_dataset; } ds = new mihasher(b, chunks); ds->populate(dataset, n, b/8); stats = new qstat[1]; }
void DisplayObject::computeMasksAndMatrix(DisplayObject* target, std::vector<IDrawable::MaskData>& masks, MATRIX& totalMatrix) const { const DisplayObject* cur=this; bool gatherMasks = true; while(cur && cur!=target) { totalMatrix=cur->getMatrix().multiplyMatrix(totalMatrix); //Get an IDrawable for all the hierarchy of each mask. if(gatherMasks) { if(cur->maskOf.isNull()) cur->gatherMaskIDrawables(masks); else { //Stop gathering masks if any level of the hierarchy it's a mask masks.clear(); masks.shrink_to_fit(); gatherMasks=false; } } cur=cur->getParent().getPtr(); } }
int main(int argc, char *argv[]) { try { LogPolicy::GetInstance().Unmute(); TIMER_START(preparing); TIMER_START(expansion); boost::filesystem::path config_file_path, input_path, restrictions_path, profile_path; unsigned int requested_num_threads; bool use_elevation; // declare a group of options that will be allowed only on command line boost::program_options::options_description generic_options("Options"); generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")( "config,c", boost::program_options::value<boost::filesystem::path>(&config_file_path) ->default_value("contractor.ini"), "Path to a configuration file."); // declare a group of options that will be allowed both on command line and in config file boost::program_options::options_description config_options("Configuration"); config_options.add_options()( "restrictions,r", boost::program_options::value<boost::filesystem::path>(&restrictions_path), "Restrictions file in .osrm.restrictions format")( "profile,p", boost::program_options::value<boost::filesystem::path>(&profile_path) ->default_value("profile.lua"),"Path to LUA routing profile")( "elevation,e", boost::program_options::value<bool>(&use_elevation)->default_value(true), "Process node elevations")( "threads,t", boost::program_options::value<unsigned int>(&requested_num_threads)->default_value(tbb::task_scheduler_init::default_num_threads()), "Number of threads to use"); // hidden options, will be allowed both on command line and in config file, but will not be // shown to the user boost::program_options::options_description hidden_options("Hidden options"); hidden_options.add_options()( "input,i", boost::program_options::value<boost::filesystem::path>(&input_path), "Input file in .osm, .osm.bz2 or .osm.pbf format"); // positional option boost::program_options::positional_options_description positional_options; positional_options.add("input", 1); // combine above options for parsing boost::program_options::options_description cmdline_options; cmdline_options.add(generic_options).add(config_options).add(hidden_options); boost::program_options::options_description config_file_options; config_file_options.add(config_options).add(hidden_options); boost::program_options::options_description visible_options( "Usage: " + boost::filesystem::basename(argv[0]) + " <input.osrm> [options]"); visible_options.add(generic_options).add(config_options); // parse command line options boost::program_options::variables_map option_variables; boost::program_options::store(boost::program_options::command_line_parser(argc, argv) .options(cmdline_options) .positional(positional_options) .run(), option_variables); if (option_variables.count("version")) { SimpleLogger().Write() << g_GIT_DESCRIPTION; return 0; } if (option_variables.count("help")) { SimpleLogger().Write() << "\n" << visible_options; return 0; } boost::program_options::notify(option_variables); if (!option_variables.count("restrictions")) { restrictions_path = std::string(input_path.string() + ".restrictions"); } if (!option_variables.count("input")) { SimpleLogger().Write() << "\n" << visible_options; return 0; } if (!boost::filesystem::is_regular_file(input_path)) { SimpleLogger().Write(logWARNING) << "Input file " << input_path.string() << " not found!"; return 1; } if (!boost::filesystem::is_regular_file(profile_path)) { SimpleLogger().Write(logWARNING) << "Profile " << profile_path.string() << " not found!"; return 1; } if (1 > requested_num_threads) { SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger"; return 1; } const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads(); SimpleLogger().Write() << "Input file: " << input_path.filename().string(); SimpleLogger().Write() << "Restrictions file: " << restrictions_path.filename().string(); SimpleLogger().Write() << "Profile: " << profile_path.filename().string(); SimpleLogger().Write() << "Using elevation: " << use_elevation; SimpleLogger().Write() << "Threads: " << requested_num_threads; if (recommended_num_threads != requested_num_threads) { SimpleLogger().Write(logWARNING) << "The recommended number of threads is " << recommended_num_threads << "! This setting may have performance side-effects."; } tbb::task_scheduler_init init(requested_num_threads); LogPolicy::GetInstance().Unmute(); boost::filesystem::ifstream restriction_stream(restrictions_path, std::ios::binary); TurnRestriction restriction; FingerPrint fingerprint_loaded, fingerprint_orig; unsigned number_of_usable_restrictions = 0; restriction_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint)); if (!fingerprint_loaded.TestPrepare(fingerprint_orig)) { SimpleLogger().Write(logWARNING) << ".restrictions was prepared with different build.\n" "Reprocess to get rid of this warning."; } restriction_stream.read((char *)&number_of_usable_restrictions, sizeof(unsigned)); restriction_list.resize(number_of_usable_restrictions); if (number_of_usable_restrictions > 0) { restriction_stream.read((char *)&(restriction_list[0]), number_of_usable_restrictions * sizeof(TurnRestriction)); } restriction_stream.close(); boost::filesystem::ifstream in; in.open(input_path, std::ios::in | std::ios::binary); const std::string node_filename = input_path.string() + ".nodes"; const std::string edge_out = input_path.string() + ".edges"; const std::string geometry_filename = input_path.string() + ".geometry"; const std::string graphOut = input_path.string() + ".hsgr"; const std::string rtree_nodes_path = input_path.string() + ".ramIndex"; const std::string rtree_leafs_path = input_path.string() + ".fileIndex"; /*** Setup Scripting Environment ***/ // Create a new lua state lua_State *lua_state = luaL_newstate(); // Connect LuaBind to this lua state luabind::open(lua_state); // open utility libraries string library; luaL_openlibs(lua_state); // adjust lua load path luaAddScriptFolderToLoadPath(lua_state, profile_path.string().c_str()); // Now call our function in a lua script if (0 != luaL_dofile(lua_state, profile_path.string().c_str())) { std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; return 1; } EdgeBasedGraphFactory::SpeedProfileProperties speed_profile; if (0 != luaL_dostring(lua_state, "return traffic_signal_penalty\n")) { std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; return 1; } speed_profile.trafficSignalPenalty = 10 * lua_tointeger(lua_state, -1); SimpleLogger().Write(logDEBUG) << "traffic_signal_penalty: " << speed_profile.trafficSignalPenalty; if (0 != luaL_dostring(lua_state, "return u_turn_penalty\n")) { std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; return 1; } speed_profile.uTurnPenalty = 10 * lua_tointeger(lua_state, -1); speed_profile.has_turn_penalty_function = lua_function_exists(lua_state, "turn_function"); #ifdef WIN32 #pragma message ("Memory consumption on Windows can be higher due to memory alignment") #else static_assert(sizeof(ImportEdge) == 20, "changing ImportEdge type has influence on memory consumption!"); #endif std::vector<ImportEdge> edge_list; NodeID number_of_node_based_nodes = readBinaryOSRMGraphFromStream(in, edge_list, barrier_node_list, traffic_light_list, &internal_to_external_node_map, restriction_list, use_elevation); in.close(); if (edge_list.empty()) { SimpleLogger().Write(logWARNING) << "The input data is empty, exiting."; return 1; } SimpleLogger().Write() << restriction_list.size() << " restrictions, " << barrier_node_list.size() << " bollard nodes, " << traffic_light_list.size() << " traffic lights"; /*** * Building an edge-expanded graph from node-based input and turn restrictions */ SimpleLogger().Write() << "Generating edge-expanded graph representation"; std::shared_ptr<NodeBasedDynamicGraph> node_based_graph = NodeBasedDynamicGraphFromImportEdges(number_of_node_based_nodes, edge_list); std::unique_ptr<RestrictionMap> restriction_map = std::unique_ptr<RestrictionMap>(new RestrictionMap(node_based_graph, restriction_list)); EdgeBasedGraphFactory *edge_based_graph_factor = new EdgeBasedGraphFactory(node_based_graph, std::move(restriction_map), barrier_node_list, traffic_light_list, internal_to_external_node_map, speed_profile); edge_list.clear(); edge_list.shrink_to_fit(); edge_based_graph_factor->Run(edge_out, geometry_filename, lua_state); restriction_list.clear(); restriction_list.shrink_to_fit(); barrier_node_list.clear(); barrier_node_list.shrink_to_fit(); traffic_light_list.clear(); traffic_light_list.shrink_to_fit(); unsigned number_of_edge_based_nodes = edge_based_graph_factor->GetNumberOfEdgeBasedNodes(); BOOST_ASSERT(number_of_edge_based_nodes != std::numeric_limits<unsigned>::max()); DeallocatingVector<EdgeBasedEdge> edgeBasedEdgeList; #ifndef WIN32 static_assert(sizeof(EdgeBasedEdge) == 16, "changing ImportEdge type has influence on memory consumption!"); #endif edge_based_graph_factor->GetEdgeBasedEdges(edgeBasedEdgeList); std::vector<EdgeBasedNode> node_based_edge_list; edge_based_graph_factor->GetEdgeBasedNodes(node_based_edge_list); delete edge_based_graph_factor; // TODO actually use scoping: Split this up in subfunctions node_based_graph.reset(); TIMER_STOP(expansion); // Building grid-like nearest-neighbor data structure SimpleLogger().Write() << "building r-tree ..."; StaticRTree<EdgeBasedNode> *rtree = new StaticRTree<EdgeBasedNode>(node_based_edge_list, rtree_nodes_path.c_str(), rtree_leafs_path.c_str(), internal_to_external_node_map); delete rtree; IteratorbasedCRC32<std::vector<EdgeBasedNode>> crc32; unsigned node_based_edge_list_CRC32 = crc32(node_based_edge_list.begin(), node_based_edge_list.end()); node_based_edge_list.clear(); node_based_edge_list.shrink_to_fit(); SimpleLogger().Write() << "CRC32: " << node_based_edge_list_CRC32; /*** * Writing info on original (node-based) nodes */ SimpleLogger().Write() << "writing node map ..."; boost::filesystem::ofstream node_stream(node_filename, std::ios::binary); const unsigned size_of_mapping = internal_to_external_node_map.size(); node_stream.write((char *)&size_of_mapping, sizeof(unsigned)); if (size_of_mapping > 0) { node_stream.write((char *)&(internal_to_external_node_map[0]), size_of_mapping * sizeof(NodeInfo)); } node_stream.close(); internal_to_external_node_map.clear(); internal_to_external_node_map.shrink_to_fit(); /*** * Contracting the edge-expanded graph */ SimpleLogger().Write() << "initializing contractor"; Contractor *contractor = new Contractor(number_of_edge_based_nodes, edgeBasedEdgeList); TIMER_START(contraction); contractor->Run(); TIMER_STOP(contraction); SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; DeallocatingVector<QueryEdge> contracted_edge_list; contractor->GetEdges(contracted_edge_list); delete contractor; /*** * Sorting contracted edges in a way that the static query graph can read some in in-place. */ std::sort(contracted_edge_list.begin(), contracted_edge_list.end()); unsigned max_used_node_id = 0; unsigned contracted_edge_count = contracted_edge_list.size(); SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count << " edges"; boost::filesystem::ofstream hsgr_output_stream(graphOut, std::ios::binary); hsgr_output_stream.write((char *)&fingerprint_orig, sizeof(FingerPrint)); for (const QueryEdge &edge : contracted_edge_list) { BOOST_ASSERT(UINT_MAX != edge.source); BOOST_ASSERT(UINT_MAX != edge.target); max_used_node_id = std::max(max_used_node_id, edge.source); max_used_node_id = std::max(max_used_node_id, edge.target); } SimpleLogger().Write(logDEBUG) << "input graph has " << number_of_edge_based_nodes << " nodes"; SimpleLogger().Write(logDEBUG) << "contracted graph has " << max_used_node_id << " nodes"; max_used_node_id += 1; std::vector<StaticGraph<EdgeData>::NodeArrayEntry> node_array; node_array.resize(number_of_edge_based_nodes + 1); SimpleLogger().Write() << "Building node array"; StaticGraph<EdgeData>::EdgeIterator edge = 0; StaticGraph<EdgeData>::EdgeIterator position = 0; StaticGraph<EdgeData>::EdgeIterator last_edge = edge; for (StaticGraph<EdgeData>::NodeIterator node = 0; node < max_used_node_id; ++node) { last_edge = edge; while ((edge < contracted_edge_count) && (contracted_edge_list[edge].source == node)) { ++edge; } node_array[node].first_edge = position; //=edge position += edge - last_edge; // remove } for (unsigned sentinel_counter = max_used_node_id; sentinel_counter != node_array.size(); ++sentinel_counter) { // sentinel element, guarded against underflow node_array[sentinel_counter].first_edge = contracted_edge_count; } unsigned node_array_size = node_array.size(); // serialize crc32, aka checksum hsgr_output_stream.write((char *)&node_based_edge_list_CRC32, sizeof(unsigned)); // serialize number of nodes hsgr_output_stream.write((char *)&node_array_size, sizeof(unsigned)); // serialize number of edges hsgr_output_stream.write((char *)&contracted_edge_count, sizeof(unsigned)); // serialize all nodes if (node_array_size > 0) { hsgr_output_stream.write((char *)&node_array[0], sizeof(StaticGraph<EdgeData>::NodeArrayEntry) * node_array_size); } // serialize all edges SimpleLogger().Write() << "Building edge array"; edge = 0; int number_of_used_edges = 0; StaticGraph<EdgeData>::EdgeArrayEntry current_edge; for (unsigned edge = 0; edge < contracted_edge_list.size(); ++edge) { // no eigen loops BOOST_ASSERT(contracted_edge_list[edge].source != contracted_edge_list[edge].target); current_edge.target = contracted_edge_list[edge].target; current_edge.data = contracted_edge_list[edge].data; // every target needs to be valid BOOST_ASSERT(current_edge.target < max_used_node_id); #ifndef NDEBUG if (current_edge.data.distance <= 0) { SimpleLogger().Write(logWARNING) << "Edge: " << edge << ",source: " << contracted_edge_list[edge].source << ", target: " << contracted_edge_list[edge].target << ", dist: " << current_edge.data.distance; SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node " << contracted_edge_list[edge].source << "/" << node_array.size() - 1; return 1; } #endif hsgr_output_stream.write((char *)¤t_edge, sizeof(StaticGraph<EdgeData>::EdgeArrayEntry)); ++number_of_used_edges; } hsgr_output_stream.close(); TIMER_STOP(preparing); SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion)) << " nodes/sec and " << (number_of_edge_based_nodes / TIMER_SEC(expansion)) << " edges/sec"; SimpleLogger().Write() << "Contraction: " << (number_of_edge_based_nodes / TIMER_SEC(contraction)) << " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction) << " edges/sec"; node_array.clear(); SimpleLogger().Write() << "finished preprocessing"; } catch (boost::program_options::too_many_positional_options_error &) { SimpleLogger().Write(logWARNING) << "Only one file can be specified"; return 1; } catch (boost::program_options::error &e) { SimpleLogger().Write(logWARNING) << e.what(); return 1; } catch (const std::exception &e) { SimpleLogger().Write(logWARNING) << "Exception occured: " << e.what() << std::endl; return 1; } return 0; }
void Level::SimplifyGridPath(std::vector<PathFinder::SimpleNode>& path) { // This function will remove nodes from a grid path to simplify the path. // Nodes that are directly visible to each other (the level is not obstructing view) // can be removed up until two nodes that can't see each other directly anylonger. // Nodes that are marked as unpassable on the navigation grid are also not removed. // This is to prevent the avatar from getting stuck at a corner. if (path.size() < 3) { return; } std::vector<PathFinder::SimpleNode>::iterator it = path.begin(); while (it < (path.end() - 2)) { // Check the grid-based x and y distance between this node, and the next one. int dX; int dY; dX = (it + 1)->x - it->x; dY = (it + 1)->y - it->y; // If it is already a diagonal-based movement, we can skip the node if ((std::abs(dX) == 1) && (std::abs(dY) == 1)) { ++it; continue; } // If we get here, it is not a diagonal-based movement. // When the node afterwards, however, is a diagonal one, // and no corner is in the way, then we can erase the node in between dX = (it + 2)->x - it->x; dY = (it + 2)->y - it->y; if ((std::abs(dX) == 1) && (std::abs(dY) == 1) && m_NavigationGrid[it->y + dY][it->x] && m_NavigationGrid[it->y][it->x + dX]) { it = path.erase(it + 1); } else { ++it; } } // Recurring raycast parameters /*DOUBLE2 intersectionRef(0, 0); DOUBLE2 normalRef(0, 0); double levelFraction = 0; bool levelHit = false; size_t currentIndex = 0; std::vector<PathFinder::SimpleNode>::iterator it1 = path.begin(); while (it1 < (path.end() - 2)) { DOUBLE2 rayStart = Level::GridToWorld(*it1); std::vector<PathFinder::SimpleNode>::iterator it2 = it1 + 1; while (it2 != path.end()) { // First check whether the grid point is passable on the navigation grid if (!m_NavigationGrid[(*it2).y][(*it2).x]) { break; } // Check whether the level does not obstruct the path DOUBLE2 rayEnd = Level::GridToWorld(*it2); levelHit = m_ActorLayoutPtr->Raycast(rayStart, rayEnd, intersectionRef, normalRef, levelFraction); if (levelHit) { break; } ++it2; } // If a hit has been registered, then we can delete the nodes // between it1 and it2 (excluding both) if ((it2 - it1) > 1) { if (it2 == path.end()) { path.erase(it1 + 1, it2 - 1); } else { path.erase(it1 + 1, it2); } it1 = path.begin() + currentIndex; } if (path.size() < 3) { break; } ++it1; ++currentIndex; }*/ path.shrink_to_fit(); }
NodeID readBinaryOSRMGraphFromStream(std::istream &input_stream, std::vector<EdgeT> &edge_list, std::vector<NodeID> &barrier_node_list, std::vector<NodeID> &traffic_light_node_list, std::vector<QueryNode> *int_to_ext_node_id_map, std::vector<TurnRestriction> &restriction_list) { const FingerPrint fingerprint_orig; FingerPrint fingerprint_loaded; input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint)); if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig)) { SimpleLogger().Write(logWARNING) << ".osrm was prepared with different build.\n" "Reprocess to get rid of this warning."; } std::unordered_map<NodeID, NodeID> ext_to_int_id_map; NodeID n; input_stream.read((char *)&n, sizeof(NodeID)); SimpleLogger().Write() << "Importing n = " << n << " nodes "; ExternalMemoryNode current_node; for (NodeID i = 0; i < n; ++i) { input_stream.read((char *)¤t_node, sizeof(ExternalMemoryNode)); int_to_ext_node_id_map->emplace_back(current_node.lat, current_node.lon, current_node.node_id); ext_to_int_id_map.emplace(current_node.node_id, i); if (current_node.barrier) { barrier_node_list.emplace_back(i); } if (current_node.traffic_lights) { traffic_light_node_list.emplace_back(i); } } // tighten vector sizes barrier_node_list.shrink_to_fit(); traffic_light_node_list.shrink_to_fit(); // renumber nodes in turn restrictions for (TurnRestriction ¤t_restriction : restriction_list) { auto internal_id_iter = ext_to_int_id_map.find(current_restriction.from.node); if (internal_id_iter == ext_to_int_id_map.end()) { SimpleLogger().Write(logDEBUG) << "Unmapped from Node of restriction"; continue; } current_restriction.from.node = internal_id_iter->second; internal_id_iter = ext_to_int_id_map.find(current_restriction.via.node); if (internal_id_iter == ext_to_int_id_map.end()) { SimpleLogger().Write(logDEBUG) << "Unmapped via node of restriction"; continue; } current_restriction.via.node = internal_id_iter->second; internal_id_iter = ext_to_int_id_map.find(current_restriction.to.node); if (internal_id_iter == ext_to_int_id_map.end()) { SimpleLogger().Write(logDEBUG) << "Unmapped to node of restriction"; continue; } current_restriction.to.node = internal_id_iter->second; } EdgeWeight weight; NodeID source, target; unsigned nameID; int length; short dir; // direction (0 = open, 1 = forward, 2+ = open) bool is_roundabout, ignore_in_grid, is_access_restricted, is_split; TravelMode travel_mode; EdgeID m; input_stream.read((char *)&m, sizeof(unsigned)); edge_list.reserve(m); SimpleLogger().Write() << " and " << m << " edges "; for (EdgeID i = 0; i < m; ++i) { input_stream.read((char *)&source, sizeof(unsigned)); input_stream.read((char *)&target, sizeof(unsigned)); input_stream.read((char *)&length, sizeof(int)); input_stream.read((char *)&dir, sizeof(short)); input_stream.read((char *)&weight, sizeof(int)); input_stream.read((char *)&nameID, sizeof(unsigned)); input_stream.read((char *)&is_roundabout, sizeof(bool)); input_stream.read((char *)&ignore_in_grid, sizeof(bool)); input_stream.read((char *)&is_access_restricted, sizeof(bool)); input_stream.read((char *)&travel_mode, sizeof(TravelMode)); input_stream.read((char *)&is_split, sizeof(bool)); BOOST_ASSERT_MSG(length > 0, "loaded null length edge"); BOOST_ASSERT_MSG(weight > 0, "loaded null weight"); BOOST_ASSERT_MSG(0 <= dir && dir <= 2, "loaded bogus direction"); bool forward = true; bool backward = true; if (1 == dir) { backward = false; } if (2 == dir) { forward = false; } // translate the external NodeIDs to internal IDs auto internal_id_iter = ext_to_int_id_map.find(source); if (ext_to_int_id_map.find(source) == ext_to_int_id_map.end()) { #ifndef NDEBUG SimpleLogger().Write(logWARNING) << " unresolved source NodeID: " << source; #endif continue; } source = internal_id_iter->second; internal_id_iter = ext_to_int_id_map.find(target); if (ext_to_int_id_map.find(target) == ext_to_int_id_map.end()) { #ifndef NDEBUG SimpleLogger().Write(logWARNING) << "unresolved target NodeID : " << target; #endif continue; } target = internal_id_iter->second; BOOST_ASSERT_MSG(source != SPECIAL_NODEID && target != SPECIAL_NODEID, "nonexisting source or target"); if (source > target) { std::swap(source, target); std::swap(forward, backward); } edge_list.emplace_back(source, target, nameID, weight, forward, backward, is_roundabout, ignore_in_grid, is_access_restricted, travel_mode, is_split); } ext_to_int_id_map.clear(); tbb::parallel_sort(edge_list.begin(), edge_list.end()); for (unsigned i = 1; i < edge_list.size(); ++i) { if ((edge_list[i - 1].target == edge_list[i].target) && (edge_list[i - 1].source == edge_list[i].source)) { const bool edge_flags_equivalent = (edge_list[i - 1].forward == edge_list[i].forward) && (edge_list[i - 1].backward == edge_list[i].backward); const bool edge_flags_are_superset1 = (edge_list[i - 1].forward && edge_list[i - 1].backward) && (edge_list[i].forward != edge_list[i].backward); const bool edge_flags_are_superset_2 = (edge_list[i].forward && edge_list[i].backward) && (edge_list[i - 1].forward != edge_list[i - 1].backward); if (edge_flags_equivalent) { edge_list[i].weight = std::min(edge_list[i - 1].weight, edge_list[i].weight); edge_list[i - 1].source = SPECIAL_NODEID; } else if (edge_flags_are_superset1) { if (edge_list[i - 1].weight <= edge_list[i].weight) { // edge i-1 is smaller and goes in both directions. Throw away the other edge edge_list[i].source = SPECIAL_NODEID; } else { // edge i-1 is open in both directions, but edge i is smaller in one direction. // Close edge i-1 in this direction edge_list[i - 1].forward = !edge_list[i].forward; edge_list[i - 1].backward = !edge_list[i].backward; } } else if (edge_flags_are_superset_2) { if (edge_list[i - 1].weight <= edge_list[i].weight) { // edge i-1 is smaller for one direction. edge i is open in both. close edge i // in the other direction edge_list[i].forward = !edge_list[i - 1].forward; edge_list[i].backward = !edge_list[i - 1].backward; } else { // edge i is smaller and goes in both direction. Throw away edge i-1 edge_list[i - 1].source = SPECIAL_NODEID; } } } } const auto new_end_iter = std::remove_if(edge_list.begin(), edge_list.end(), [] (const EdgeT &edge) { return edge.source == SPECIAL_NODEID || edge.target == SPECIAL_NODEID; }); edge_list.erase(new_end_iter, edge_list.end()); // remove excess candidates. edge_list.shrink_to_fit(); SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges"; return n; }
void monfactions::finalize() { if( faction_list.empty() ) { debugmsg( "No monster factions found." ); return; } // Create a tree of faction dependence std::multimap< mfaction_id, mfaction_id > child_map; std::set< mfaction_id > unloaded; // To check if cycles exist std::queue< mfaction_id > queue; for( auto &faction : faction_list ) { unloaded.insert( faction.loadid ); if( faction.loadid == faction.base_faction ) { // No parent = root of the (a?) tree queue.push( faction.loadid ); continue; } // Point parent to children if( faction.base_faction >= 0 ) { child_map.insert( std::make_pair( faction.base_faction, faction.loadid ) ); } // Set faction as friendly to itself if not explicitly set to anything if( faction.attitude_map.count( faction.loadid ) == 0 ) { faction.attitude_map[faction.loadid] = MFA_FRIENDLY; } } if( queue.empty() && !faction_list.empty() ) { debugmsg( "No valid root monster faction!" ); return; } // Set uninitialized factions to be children of the root. // If more than one root exists, use the first one. const auto root = queue.front(); for( auto &faction : faction_list ) { if( faction.base_faction < 0 ) { faction.base_faction = root; // If it is the (new) root, connecting it to own parent (self) would create a cycle. // So only try to connect it to the parent if it isn't own parent. if( faction.base_faction != faction.loadid ) { child_map.insert( std::make_pair( faction.base_faction, faction.loadid ) ); } } } // Traverse the tree (breadth-first), starting from root while( !queue.empty() ) { mfaction_id cur = queue.front(); queue.pop(); if( unloaded.count( cur ) != 0 ) { unloaded.erase( cur ); } else { debugmsg( "Tried to load monster faction %s more than once", cur.obj().id.c_str() ); continue; } auto children = child_map.equal_range( cur ); for( auto &it = children.first; it != children.second; ++it ) { // Copy attributes to child apply_base_faction( cur, it->second ); queue.push( it->second ); } } // Bad json if( !unloaded.empty() ) { std::string names; for( auto &fac : unloaded ) { names.append( fac.id().str() ); names.append( " " ); auto &the_faction = faction_list[fac]; the_faction.base_faction = root; } debugmsg( "Cycle encountered when processing monster factions. Bad factions:\n %s", names.c_str() ); } faction_list.shrink_to_fit(); // Save a couple of bytes }
// Reads an ifstream and puts all chars into a vector void file_to_vector( std::vector<char> &vector, std::ifstream &myFile ) { vector.assign( ( std::istreambuf_iterator<char>( myFile ) ), std::istreambuf_iterator<char>() ); vector.shrink_to_fit(); }
void PacketLoader_Read_Callback(void* urlloader, int32_t result) { int64_t bytes_received, total_bytes_to_be_received; ppb_urlloader_interface->GetDownloadProgress((PP_Resource)urlloader, &bytes_received, &total_bytes_to_be_received); if (total_bytes_to_be_received > 0 && vecUrlLoaderBuf.capacity() < (size_t)total_bytes_to_be_received) vecUrlLoaderBuf.reserve((size_t)total_bytes_to_be_received); if (result != URLLOAD_BUFSIZE) vecUrlLoaderBuf.resize(vecUrlLoaderBuf.size() - URLLOAD_BUFSIZE + (result > 0 ? result : 0)); //ZL_LOG("PKGLOAD", "PacketLoader_Read_Callback - read: %d - bufsize = %d - total_bytes_to_be_received: %d - Total Bytes So Far: %d - bytes_received: %d", (int)result, (int)vecUrlLoaderBuf.size(), (int)total_bytes_to_be_received, vecUrlLoaderBuf.size(), (int)bytes_received); if (result > 0 && (total_bytes_to_be_received < 0 || (vecUrlLoaderBuf.size() < total_bytes_to_be_received))) { if (total_bytes_to_be_received > 0) { //PKP = Package Progress char msgmsg[1024]; uint32_t msgmsglen = sprintf(msgmsg, "PKP%d/%d", (int)bytes_received, (int)total_bytes_to_be_received); if (ppb_messaging_interface) ppb_messaging_interface->PostMessage(instance_, ppb_var_interface->VarFromUtf8(msgmsg, msgmsglen)); } vecUrlLoaderBuf.resize(vecUrlLoaderBuf.size() + URLLOAD_BUFSIZE); const PP_CompletionCallback cc = { &PacketLoader_Read_Callback, urlloader, 0 }; ppb_urlloader_interface->ReadResponseBody((PP_Resource)urlloader, &vecUrlLoaderBuf[vecUrlLoaderBuf.size() - URLLOAD_BUFSIZE], URLLOAD_BUFSIZE, cc); return; } int32_t status = ppb_urlresponseinfo_interface->GetProperty(ppb_urlloader_interface->GetResponseInfo((PP_Resource)urlloader), PP_URLRESPONSEPROPERTY_STATUSCODE).value.as_int; ppb_urlloader_interface->Close((PP_Resource)urlloader); if (status == 200 && vecUrlLoaderBuf.size() > 20) { #ifdef ZL_NACL_URL_LOADER_CAN_STILL_BE_GZ_COMPRESSED //this should not be required with recent Chrome/NACL/PNACL/Pepper versions if (vecUrlLoaderBuf[0] == 0x1F && (unsigned char)vecUrlLoaderBuf[1] == 0x8B && vecUrlLoaderBuf[2] == 8) { //ZL_LOG("PEXEGZ", "Found GZ header, decompressing ..."); std::vector<char> uncompr; uncompr.clear(); unsigned char *gzstart = (unsigned char*)&vecUrlLoaderBuf[0], *zstart = gzstart + 10; //after gz header if (vecUrlLoaderBuf[3] & 0x4) zstart += 2; //skip 2 bytes extra header if (vecUrlLoaderBuf[3] & 0x8) while (*(zstart++) != '\0'); //skip to end of zero terminated file name string if (vecUrlLoaderBuf[3] & 0x10) while (*(zstart++) != '\0'); //skip to end of zero terminated comment string if (vecUrlLoaderBuf[3] & 0x2) zstart += 2; //skip 2 bytes crc z_stream dec_stream; memset(&dec_stream, 0, sizeof(dec_stream)); dec_stream.avail_in = vecUrlLoaderBuf.size() - (zstart - gzstart); dec_stream.next_in = zstart; enum { UNCOMP_BUF_SIZE = 2048 }; size_t out_size = 0; for (int err = inflateInit2(&dec_stream, -MZ_DEFAULT_WINDOW_BITS); err == Z_OK; out_size += UNCOMP_BUF_SIZE - dec_stream.avail_out) { uncompr.resize(out_size + UNCOMP_BUF_SIZE); dec_stream.next_out = (unsigned char*)&uncompr[out_size]; dec_stream.avail_out = UNCOMP_BUF_SIZE; err = inflate(&dec_stream, Z_NO_FLUSH); } uncompr.resize(out_size); inflateEnd(&dec_stream); //ZL_LOG("PEXEGZ", "Decompressed %d bytes to %d bytes", vecUrlLoaderBuf.size(), uncompr.size()); vecUrlLoaderBuf.swap(uncompr); } #endif uint32_t size_before_zip = 0, loaded_size = (uint32_t)vecUrlLoaderBuf.size(); char* pPEXEBegin = &*vecUrlLoaderBuf.begin(), *pZipHdr = pPEXEBegin + loaded_size - 20; for (; pZipHdr > pPEXEBegin; pZipHdr--) if (pZipHdr[0] == 0x50 && pZipHdr[1] == 0x4b && pZipHdr[2] == 0x05 && pZipHdr[3] == 0x06) break; if (pZipHdr > pPEXEBegin) { uint32_t size_central_dir = *(uint32_t*)&pZipHdr[12]; uint32_t offset_central_dir = *(uint32_t*)&pZipHdr[16]; size_before_zip = (uint32_t)(pZipHdr - pPEXEBegin) - (offset_central_dir + size_central_dir); if (size_central_dir > loaded_size || offset_central_dir > loaded_size || size_before_zip > loaded_size) size_before_zip = 0; //ZL_LOG("PEXEZIP", "central_pos: %u - size_central_dir: %u - offset_central_dir: %u - size_before_zip: %u", (uint32_t)(pZipHdr - pPEXEBegin), size_central_dir, offset_central_dir, size_before_zip); } if (size_before_zip > (uint32_t)(loaded_size/16)) //if size before start of asset zip archive is more than one 16th of the entire file, deallocate that part { //ZL_LOG("PEXEZIP", "Shrink by %u - VecSize %u -> %u (Capacity Before: %u)", size_before_zip, (uint32_t)loaded_size, (uint32_t)loaded_size - size_before_zip, (uint32_t)vecUrlLoaderBuf.capacity()); vecUrlLoaderBuf.erase(vecUrlLoaderBuf.begin(), vecUrlLoaderBuf.begin() + size_before_zip); vecUrlLoaderBuf.shrink_to_fit(); //ZL_LOG("PEXEZIP", "(Capacity After: %u)", (uint32_t)vecUrlLoaderBuf.capacity()); } } if (status != 200) { //PKE = Package Error if (ppb_messaging_interface) ppb_messaging_interface->PostMessage(instance_, ppb_var_interface->VarFromUtf8("PKE", 3)); return; } ZL_File::DefaultReadFileContainer = ZL_FileContainer_ZIP(ZL_File((void*)(vecUrlLoaderBuf.empty() ? NULL : &vecUrlLoaderBuf[0]), vecUrlLoaderBuf.size())); //PKP = Package Done if (ppb_messaging_interface) ppb_messaging_interface->PostMessage(instance_, ppb_var_interface->VarFromUtf8("PKD", 3)); }
inline static void clean() { v.clear(); v.shrink_to_fit(); }
int main(int argc, char *argv[]) { LogPolicy::GetInstance().Unmute(); try { // enable logging if (argc < 3) { SimpleLogger().Write(logWARNING) << "usage:\n" << argv[0] << " <osrm> <osrm.restrictions>"; return -1; } SimpleLogger().Write() << "Using restrictions from file: " << argv[2]; std::ifstream restriction_ifstream(argv[2], std::ios::binary); const FingerPrint fingerprint_orig; FingerPrint fingerprint_loaded; restriction_ifstream.read((char *)&fingerprint_loaded, sizeof(FingerPrint)); // check fingerprint and warn if necessary if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig)) { SimpleLogger().Write(logWARNING) << argv[2] << " was prepared with a different build. " "Reprocess to get rid of this warning."; } if (!restriction_ifstream.good()) { throw osrm::exception("Could not access <osrm-restrictions> files"); } uint32_t usable_restrictions = 0; restriction_ifstream.read((char *)&usable_restrictions, sizeof(uint32_t)); restriction_list.resize(usable_restrictions); // load restrictions if (usable_restrictions > 0) { restriction_ifstream.read((char *)&(restriction_list[0]), usable_restrictions * sizeof(TurnRestriction)); } restriction_ifstream.close(); std::ifstream input_stream(argv[1], std::ifstream::in | std::ifstream::binary); if (!input_stream.is_open()) { throw osrm::exception("Cannot open osrm file"); } // load graph data std::vector<ImportEdge> edge_list; const NodeID number_of_nodes = readBinaryOSRMGraphFromStream(input_stream, edge_list, bollard_node_list, traffic_lights_list, &coordinate_list, restriction_list); input_stream.close(); BOOST_ASSERT_MSG(restriction_list.size() == usable_restrictions, "size of restriction_list changed"); SimpleLogger().Write() << restriction_list.size() << " restrictions, " << bollard_node_list.size() << " bollard nodes, " << traffic_lights_list.size() << " traffic lights"; traffic_lights_list.clear(); traffic_lights_list.shrink_to_fit(); // Building an node-based graph DeallocatingVector<TarjanEdge> graph_edge_list; for (const NodeBasedEdge &input_edge : edge_list) { if (input_edge.source == input_edge.target) { continue; } if (input_edge.forward) { graph_edge_list.emplace_back(input_edge.source, input_edge.target, (std::max)((int)input_edge.weight, 1), input_edge.name_id); } if (input_edge.backward) { graph_edge_list.emplace_back(input_edge.target, input_edge.source, (std::max)((int)input_edge.weight, 1), input_edge.name_id); } } edge_list.clear(); edge_list.shrink_to_fit(); BOOST_ASSERT_MSG(0 == edge_list.size() && 0 == edge_list.capacity(), "input edge vector not properly deallocated"); tbb::parallel_sort(graph_edge_list.begin(), graph_edge_list.end()); auto graph = std::make_shared<TarjanDynamicGraph>(number_of_nodes, graph_edge_list); edge_list.clear(); edge_list.shrink_to_fit(); SimpleLogger().Write() << "Starting SCC graph traversal"; RestrictionMap restriction_map(restriction_list); auto tarjan = osrm::make_unique<TarjanSCC<TarjanDynamicGraph>>(graph, restriction_map, bollard_node_list); tarjan->run(); SimpleLogger().Write() << "identified: " << tarjan->get_number_of_components() << " many components"; SimpleLogger().Write() << "identified " << tarjan->get_size_one_count() << " SCCs of size 1"; // output TIMER_START(SCC_RUN_SETUP); // remove files from previous run if exist DeleteFileIfExists("component.dbf"); DeleteFileIfExists("component.shx"); DeleteFileIfExists("component.shp"); Percent p(graph->GetNumberOfNodes()); OGRRegisterAll(); const char *pszDriverName = "ESRI Shapefile"; OGRSFDriver *poDriver = OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(pszDriverName); if (nullptr == poDriver) { throw osrm::exception("ESRI Shapefile driver not available"); } OGRDataSource *poDS = poDriver->CreateDataSource("component.shp", nullptr); if (nullptr == poDS) { throw osrm::exception("Creation of output file failed"); } OGRSpatialReference *poSRS = new OGRSpatialReference(); poSRS->importFromEPSG(4326); OGRLayer *poLayer = poDS->CreateLayer("component", poSRS, wkbLineString, nullptr); if (nullptr == poLayer) { throw osrm::exception("Layer creation failed."); } TIMER_STOP(SCC_RUN_SETUP); SimpleLogger().Write() << "shapefile setup took " << TIMER_MSEC(SCC_RUN_SETUP)/1000. << "s"; uint64_t total_network_distance = 0; p.reinit(graph->GetNumberOfNodes()); TIMER_START(SCC_OUTPUT); for (const NodeID source : osrm::irange(0u, graph->GetNumberOfNodes())) { p.printIncrement(); for (const auto current_edge : graph->GetAdjacentEdgeRange(source)) { const TarjanDynamicGraph::NodeIterator target = graph->GetTarget(current_edge); if (source < target || graph->EndEdges(target) == graph->FindEdge(target, source)) { total_network_distance += 100 * FixedPointCoordinate::ApproximateEuclideanDistance( coordinate_list[source].lat, coordinate_list[source].lon, coordinate_list[target].lat, coordinate_list[target].lon); BOOST_ASSERT(current_edge != SPECIAL_EDGEID); BOOST_ASSERT(source != SPECIAL_NODEID); BOOST_ASSERT(target != SPECIAL_NODEID); const unsigned size_of_containing_component = std::min(tarjan->get_component_size(source), tarjan->get_component_size(target)); // edges that end on bollard nodes may actually be in two distinct components if (size_of_containing_component < 1000) { OGRLineString lineString; lineString.addPoint(coordinate_list[source].lon / COORDINATE_PRECISION, coordinate_list[source].lat / COORDINATE_PRECISION); lineString.addPoint(coordinate_list[target].lon / COORDINATE_PRECISION, coordinate_list[target].lat / COORDINATE_PRECISION); OGRFeature *poFeature = OGRFeature::CreateFeature(poLayer->GetLayerDefn()); poFeature->SetGeometry(&lineString); if (OGRERR_NONE != poLayer->CreateFeature(poFeature)) { throw osrm::exception("Failed to create feature in shapefile."); } OGRFeature::DestroyFeature(poFeature); } } } } OGRSpatialReference::DestroySpatialReference(poSRS); OGRDataSource::DestroyDataSource(poDS); TIMER_STOP(SCC_OUTPUT); SimpleLogger().Write() << "generating output took: " << TIMER_MSEC(SCC_OUTPUT)/1000. << "s"; SimpleLogger().Write() << "total network distance: " << (uint64_t)total_network_distance / 100 / 1000. << " km"; SimpleLogger().Write() << "finished component analysis"; } catch (const std::exception &e) { SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); } return 0; }
////////////////////////////////// // Function to parse the memory access trace (input) ////////////////////////////////// Dim3 read_file(std::vector<Thread> &threads, const std::string kernelname, const std::string benchname, const std::string suitename) { unsigned num_threads = 0; unsigned num_accesses = 0; std::string filename = trace_dir+"/"+suitename+"/"+benchname+"/"+kernelname+".trc"; // Test if the file exists, return if it does not exist std::ifstream exists_file(filename); if (!exists_file) { return Dim3({0,0,0}); } // Open the file for reading std::cout << SPLIT_STRING << std::endl; message(""); std::cout << "### Reading the trace file for '" << kernelname << "'..."; std::ifstream input_file(filename); // First get the blocksize from the trace file std::string temp_string; Dim3 blockdim; input_file >> temp_string >> blockdim.x >> blockdim.y >> blockdim.z; // Then proceed to the actual trace data unsigned thread, direction, bytes; unsigned long address; unsigned pc, block; while (input_file >> thread >> address >> bytes >> pc >> block) { direction = 0; // Consider only loads (stores are not cached in Fermi's L1 caches) if (direction == 0) { // Count the number of accesses and threads num_accesses++; num_threads = (num_threads > thread) ? num_threads : thread + 1; // Store the data in the Thread class Access access = { direction, address, 1, bytes, address+bytes-1, pc, block }; if (access.address == 0) access.width = 0; threads[thread].append_access(access); } } std::cout << "done" << std::endl; // Test if the file actually contained memory accesses - exit otherwise if (!(num_accesses > 0 && num_threads > 0)) { std::cout << "### Error: '" << filename << "' is not a valid memory access trace" << std::endl; message(""); return Dim3({0,0,0}); } // Reduce the size of the threads vector threads.resize(num_threads); threads.shrink_to_fit(); // Print additional information and return the threadblock dimensions std::cout << "### Blocksize: (" << blockdim.x << "," << blockdim.y << "," << blockdim.z << ")" << std::endl; std::cout << "### Total threads: " << num_threads << std::endl; std::cout << "### Total memory accesses: " << num_accesses << "" << std::endl; return blockdim; }