/** * Creates the nodes in the edge expanded graph from edges in the node-based graph. */ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes() { SimpleLogger().Write() << "Identifying components of the (compressed) road network"; // Run a BFS on the undirected graph and identify small components TarjanSCC<NodeBasedDynamicGraph> component_explorer(m_node_based_graph, *m_restriction_map, m_barrier_nodes); component_explorer.run(); SimpleLogger().Write() << "identified: " << component_explorer.get_number_of_components() - removed_node_count << " (compressed) components"; SimpleLogger().Write() << "identified " << component_explorer.get_size_one_count() - removed_node_count << " (compressed) SCCs of size 1"; SimpleLogger().Write() << "generating edge-expanded nodes"; Percent progress(m_node_based_graph->GetNumberOfNodes()); // loop over all edges and generate new set of nodes for (const auto node_u : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes())) { BOOST_ASSERT(node_u != SPECIAL_NODEID); BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes()); progress.printStatus(node_u); for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) { const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1); BOOST_ASSERT(e1 != SPECIAL_EDGEID); const NodeID node_v = m_node_based_graph->GetTarget(e1); BOOST_ASSERT(SPECIAL_NODEID != node_v); // pick only every other edge, since we have every edge as an outgoing // and incoming egde if (node_u > node_v) { continue; } BOOST_ASSERT(node_u < node_v); // Note: edges that end on barrier nodes or on a turn restriction // may actually be in two distinct components. We choose the smallest const unsigned size_of_component = std::min(component_explorer.get_component_size(node_u), component_explorer.get_component_size(node_v)); const unsigned id_of_smaller_component = [node_u, node_v, &component_explorer] { if (component_explorer.get_component_size(node_u) < component_explorer.get_component_size(node_v)) { return component_explorer.get_component_id(node_u); } return component_explorer.get_component_id(node_v); }(); const bool component_is_tiny = size_of_component < 1000; // we only set edgeBasedNodeID for forward edges if (edge_data.edgeBasedNodeID == SPECIAL_NODEID) { InsertEdgeBasedNode(node_v, node_u, (component_is_tiny ? id_of_smaller_component + 1 : 0)); } else { InsertEdgeBasedNode(node_u, node_v, (component_is_tiny ? id_of_smaller_component + 1 : 0)); } } } SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() << " nodes in edge-expanded graph"; }
static int ispunct(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_PUNCT); }
static int isupper(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_UPPER); }
static int isalpha(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_ALPHA); }
static int iscntrl(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_CTRL); }
void recompute(std::vector<hpx::naming::id_type> const& search_objects) { BOOST_ASSERT(gid_); this->base_type::recompute(gid_, search_objects); }
WaterUseEquipmentDefinitionInspectorView::WaterUseEquipmentDefinitionInspectorView(bool isIP, const openstudio::model::Model& model, QWidget * parent) : ModelObjectInspectorView(model, true, parent) { m_isIP = isIP; bool isConnected = false; QWidget* visibleWidget = new QWidget(); this->stackedWidget()->addWidget(visibleWidget); QGridLayout* mainGridLayout = new QGridLayout(); mainGridLayout->setContentsMargins(7,7,7,7); mainGridLayout->setSpacing(14); visibleWidget->setLayout(mainGridLayout); // Name QLabel* label = new QLabel("Name: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,0,0); m_nameEdit = new OSLineEdit(); mainGridLayout->addWidget(m_nameEdit,1,0,1,3); // End Use Subcategory label = new QLabel("End Use Subcategory: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,2,0); m_endUseSubcategoryEdit = new OSLineEdit(); mainGridLayout->addWidget(m_endUseSubcategoryEdit,3,0,1,3); // Peak Flow Rate label = new QLabel("Peak Flow Rate: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,4,0); m_peakFlowRateEdit = new OSQuantityEdit(m_isIP); isConnected = connect(this, SIGNAL(toggleUnitsClicked(bool)), m_peakFlowRateEdit, SLOT(onUnitSystemChange(bool))); BOOST_ASSERT(isConnected); mainGridLayout->addWidget(m_peakFlowRateEdit,5,0,1,3); // Target Temperature Schedule label = new QLabel("Target Temperature Schedule: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,6,0); m_targetTemperatureScheduleVC = new TargetTemperatureScheduleVC(); m_targetTemperatureScheduleDZ = new OSDropZone(m_targetTemperatureScheduleVC); m_targetTemperatureScheduleDZ->setMaxItems(1); mainGridLayout->addWidget(m_targetTemperatureScheduleDZ,7,0,1,3); // Sensible Fraction Schedule label = new QLabel("Sensible Fraction Schedule: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,8,0); m_sensibleFractionScheduleVC = new SensibleFractionScheduleVC(); m_sensibleFractionScheduleDZ = new OSDropZone(m_sensibleFractionScheduleVC); m_sensibleFractionScheduleDZ->setMaxItems(1); mainGridLayout->addWidget(m_sensibleFractionScheduleDZ,9,0,1,3); // Latent Fraction Schedule label = new QLabel("Latent Fraction Schedule: "); label->setObjectName("H2"); mainGridLayout->addWidget(label,10,0); m_latentFractionScheduleVC = new LatentFractionScheduleVC(); m_latentFractionScheduleDZ = new OSDropZone(m_latentFractionScheduleVC); m_latentFractionScheduleDZ->setMaxItems(1); mainGridLayout->addWidget(m_latentFractionScheduleDZ,11,0,1,3); // Stretch mainGridLayout->setRowStretch(12,100); mainGridLayout->setColumnStretch(3,100); }
lcos::future<void> enforce_async(std::vector<hpx::naming::id_type> const& master_gids,double dt, std::size_t n,std::size_t N) { BOOST_ASSERT(gid_); return this->base_type::enforce_async(gid_,master_gids,dt,n,N); }
void move(double dt,double time) { BOOST_ASSERT(gid_); this->base_type::move(gid_,dt,time); }
lcos::future<void> move_async(double dt,double time) { BOOST_ASSERT(gid_); return this->base_type::move_async(gid_,dt,time); }
lcos::future<void> adjust_async(double dt) { BOOST_ASSERT(gid_); return this->base_type::adjust_async(gid_,dt); }
polygon_type get_poly() const { BOOST_ASSERT(gid_); return this->base_type::get_poly(gid_); }
void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u, const NodeID node_v, const unsigned component_id) { // merge edges together into one EdgeBasedNode BOOST_ASSERT(node_u != SPECIAL_NODEID); BOOST_ASSERT(node_v != SPECIAL_NODEID); // find forward edge id and const EdgeID edge_id_1 = m_node_based_graph->FindEdge(node_u, node_v); BOOST_ASSERT(edge_id_1 != SPECIAL_EDGEID); const EdgeData &forward_data = m_node_based_graph->GetEdgeData(edge_id_1); // find reverse edge id and const EdgeID edge_id_2 = m_node_based_graph->FindEdge(node_v, node_u); BOOST_ASSERT(edge_id_2 != SPECIAL_EDGEID); const EdgeData &reverse_data = m_node_based_graph->GetEdgeData(edge_id_2); if (forward_data.edgeBasedNodeID == SPECIAL_NODEID && reverse_data.edgeBasedNodeID == SPECIAL_NODEID) { return; } BOOST_ASSERT(m_geometry_compressor.HasEntryForID(edge_id_1) == m_geometry_compressor.HasEntryForID(edge_id_2)); if (m_geometry_compressor.HasEntryForID(edge_id_1)) { BOOST_ASSERT(m_geometry_compressor.HasEntryForID(edge_id_2)); // reconstruct geometry and put in each individual edge with its offset const std::vector<GeometryCompressor::CompressedNode> &forward_geometry = m_geometry_compressor.GetBucketReference(edge_id_1); const std::vector<GeometryCompressor::CompressedNode> &reverse_geometry = m_geometry_compressor.GetBucketReference(edge_id_2); BOOST_ASSERT(forward_geometry.size() == reverse_geometry.size()); BOOST_ASSERT(0 != forward_geometry.size()); const unsigned geometry_size = static_cast<unsigned>(forward_geometry.size()); BOOST_ASSERT(geometry_size > 1); // reconstruct bidirectional edge with individual weights and put each into the NN index std::vector<int> forward_dist_prefix_sum(forward_geometry.size(), 0); std::vector<int> reverse_dist_prefix_sum(reverse_geometry.size(), 0); // quick'n'dirty prefix sum as std::partial_sum needs addtional casts // TODO: move to lambda function with C++11 int temp_sum = 0; for (const auto i : osrm::irange(0u, geometry_size)) { forward_dist_prefix_sum[i] = temp_sum; temp_sum += forward_geometry[i].second; BOOST_ASSERT(forward_data.distance >= temp_sum); } temp_sum = 0; for (const auto i : osrm::irange(0u, geometry_size)) { temp_sum += reverse_geometry[reverse_geometry.size() - 1 - i].second; reverse_dist_prefix_sum[i] = reverse_data.distance - temp_sum; // BOOST_ASSERT(reverse_data.distance >= temp_sum); } NodeID current_edge_source_coordinate_id = node_u; if (SPECIAL_NODEID != forward_data.edgeBasedNodeID) { max_id = std::max(forward_data.edgeBasedNodeID, max_id); } if (SPECIAL_NODEID != reverse_data.edgeBasedNodeID) { max_id = std::max(reverse_data.edgeBasedNodeID, max_id); } // traverse arrays from start and end respectively for (const auto i : osrm::irange(0u, geometry_size)) { BOOST_ASSERT(current_edge_source_coordinate_id == reverse_geometry[geometry_size - 1 - i].first); const NodeID current_edge_target_coordinate_id = forward_geometry[i].first; BOOST_ASSERT(current_edge_target_coordinate_id != current_edge_source_coordinate_id); // build edges m_edge_based_node_list.emplace_back( forward_data.edgeBasedNodeID, reverse_data.edgeBasedNodeID, current_edge_source_coordinate_id, current_edge_target_coordinate_id, forward_data.nameID, forward_geometry[i].second, reverse_geometry[geometry_size - 1 - i].second, forward_dist_prefix_sum[i], reverse_dist_prefix_sum[i], m_geometry_compressor.GetPositionForID(edge_id_1), component_id, i, forward_data.travel_mode, reverse_data.travel_mode); current_edge_source_coordinate_id = current_edge_target_coordinate_id; BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed()); BOOST_ASSERT(node_u != m_edge_based_node_list.back().u || node_v != m_edge_based_node_list.back().v); BOOST_ASSERT(node_u != m_edge_based_node_list.back().v || node_v != m_edge_based_node_list.back().u); } BOOST_ASSERT(current_edge_source_coordinate_id == node_v); BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed()); } else { BOOST_ASSERT(!m_geometry_compressor.HasEntryForID(edge_id_2)); if (forward_data.edgeBasedNodeID != SPECIAL_NODEID) { BOOST_ASSERT(forward_data.forward); } else { BOOST_ASSERT(!forward_data.forward); } if (reverse_data.edgeBasedNodeID != SPECIAL_NODEID) { BOOST_ASSERT(reverse_data.forward); } else { BOOST_ASSERT(!reverse_data.forward); } BOOST_ASSERT(forward_data.edgeBasedNodeID != SPECIAL_NODEID || reverse_data.edgeBasedNodeID != SPECIAL_NODEID); m_edge_based_node_list.emplace_back( forward_data.edgeBasedNodeID, reverse_data.edgeBasedNodeID, node_u, node_v, forward_data.nameID, forward_data.distance, reverse_data.distance, 0, 0, SPECIAL_EDGEID, component_id, 0, forward_data.travel_mode, reverse_data.travel_mode); BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed()); } }
/** * Actually it also generates OriginalEdgeData and serializes them... */ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( const std::string &original_edge_data_filename, lua_State *lua_state) { SimpleLogger().Write() << "generating edge-expanded edges"; unsigned node_based_edge_counter = 0; unsigned original_edges_counter = 0; std::ofstream edge_data_file(original_edge_data_filename.c_str(), std::ios::binary); // writes a dummy value that is updated later edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned)); std::vector<OriginalEdgeData> original_edge_data_vector; original_edge_data_vector.reserve(1024 * 1024); // Loop over all turns and generate new set of edges. // Three nested loop look super-linear, but we are dealing with a (kind of) // linear number of turns only. unsigned restricted_turns_counter = 0; unsigned skipped_uturns_counter = 0; unsigned skipped_barrier_turns_counter = 0; unsigned compressed = 0; Percent progress(m_node_based_graph->GetNumberOfNodes()); for (const auto node_u : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes())) { progress.printStatus(node_u); for (const EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) { if (!m_node_based_graph->GetEdgeData(e1).forward) { continue; } ++node_based_edge_counter; const NodeID node_v = m_node_based_graph->GetTarget(e1); const NodeID only_restriction_to_node = m_restriction_map->CheckForEmanatingIsOnlyTurn(node_u, node_v); const bool is_barrier_node = m_barrier_nodes.find(node_v) != m_barrier_nodes.end(); for (const EdgeID e2 : m_node_based_graph->GetAdjacentEdgeRange(node_v)) { if (!m_node_based_graph->GetEdgeData(e2).forward) { continue; } const NodeID node_w = m_node_based_graph->GetTarget(e2); if ((only_restriction_to_node != SPECIAL_NODEID) && (node_w != only_restriction_to_node)) { // We are at an only_-restriction but not at the right turn. ++restricted_turns_counter; continue; } if (is_barrier_node) { if (node_u != node_w) { ++skipped_barrier_turns_counter; continue; } } else { if ((node_u == node_w) && (m_node_based_graph->GetOutDegree(node_v) > 1)) { ++skipped_uturns_counter; continue; } } // only add an edge if turn is not a U-turn except when it is // at the end of a dead-end street if (m_restriction_map->CheckIfTurnIsRestricted(node_u, node_v, node_w) && (only_restriction_to_node == SPECIAL_NODEID) && (node_w != only_restriction_to_node)) { // We are at an only_-restriction but not at the right turn. ++restricted_turns_counter; continue; } // only add an edge if turn is not prohibited const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(e1); const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(e2); BOOST_ASSERT(edge_data1.edgeBasedNodeID != edge_data2.edgeBasedNodeID); BOOST_ASSERT(edge_data1.forward); BOOST_ASSERT(edge_data2.forward); // the following is the core of the loop. unsigned distance = edge_data1.distance; if (m_traffic_lights.find(node_v) != m_traffic_lights.end()) { distance += speed_profile.traffic_signal_penalty; } // unpack last node of first segment if packed const auto first_coordinate = m_node_info_list[(m_geometry_compressor.HasEntryForID(e1) ? m_geometry_compressor.GetLastNodeIDOfBucket(e1) : node_u)]; // unpack first node of second segment if packed const auto third_coordinate = m_node_info_list[(m_geometry_compressor.HasEntryForID(e2) ? m_geometry_compressor.GetFirstNodeIDOfBucket(e2) : node_w)]; const double turn_angle = ComputeAngle::OfThreeFixedPointCoordinates( first_coordinate, m_node_info_list[node_v], third_coordinate); const int turn_penalty = GetTurnPenalty(turn_angle, lua_state); TurnInstruction turn_instruction = AnalyzeTurn(node_u, node_v, node_w, turn_angle); if (turn_instruction == TurnInstruction::UTurn) { distance += speed_profile.u_turn_penalty; } distance += turn_penalty; const bool edge_is_compressed = m_geometry_compressor.HasEntryForID(e1); if (edge_is_compressed) { ++compressed; } original_edge_data_vector.emplace_back( (edge_is_compressed ? m_geometry_compressor.GetPositionForID(e1) : node_v), edge_data1.nameID, turn_instruction, edge_is_compressed, edge_data2.travel_mode); ++original_edges_counter; if (original_edge_data_vector.size() > 1024 * 1024 * 10) { FlushVectorToStream(edge_data_file, original_edge_data_vector); } BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edgeBasedNodeID); BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edgeBasedNodeID); m_edge_based_edge_list.emplace_back( EdgeBasedEdge(edge_data1.edgeBasedNodeID, edge_data2.edgeBasedNodeID, m_edge_based_edge_list.size(), distance, true, false)); } } } FlushVectorToStream(edge_data_file, original_edge_data_vector); edge_data_file.seekp(std::ios::beg); edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned)); edge_data_file.close(); SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() << " edge based nodes"; SimpleLogger().Write() << "Node-based graph contains " << node_based_edge_counter << " edges"; SimpleLogger().Write() << "Edge-expanded graph ..."; SimpleLogger().Write() << " contains " << m_edge_based_edge_list.size() << " edges"; SimpleLogger().Write() << " skips " << restricted_turns_counter << " turns, " "defined by " << m_restriction_map->size() << " restrictions"; SimpleLogger().Write() << " skips " << skipped_uturns_counter << " U turns"; SimpleLogger().Write() << " skips " << skipped_barrier_turns_counter << " turns over barriers"; }
lcos::future<void> recompute_async(std::vector<hpx::naming::id_type> const& search_objects) { BOOST_ASSERT(gid_); return this->base_type::recompute_async(gid_, search_objects); }
void adjust(double dt) { BOOST_ASSERT(gid_); this->base_type::adjust(gid_,dt); }
int search(std::vector<hpx::naming::id_type> const& search_objects) { BOOST_ASSERT(gid_); return this->base_type::search(gid_, search_objects); }
void enforce(std::vector<hpx::naming::id_type> const& master_gids,double dt, std::size_t n,std::size_t N) { BOOST_ASSERT(gid_); this->base_type::enforce(gid_,master_gids,dt,n,N); }
lcos::future<polygon_type> get_poly_async() const { BOOST_ASSERT(gid_); return this->base_type::get_poly_async(gid_); }
lcos::future<double> get_Y_async() const { BOOST_ASSERT(gid_); return this->base_type::get_Y_async(gid_); }
network_boost::shared_ptr<Object const> object_cache<Key, Object>::do_get(const Key& k, size_type l_max_cache_size) { typedef typename object_cache<Key, Object>::data object_data; typedef typename map_type::size_type map_size_type; static object_data s_data; // // see if the object is already in the cache: // map_iterator mpos = s_data.index.find(k); if(mpos != s_data.index.end()) { // // Eureka! // We have a cached item, bump it up the list and return it: // if(--(s_data.cont.end()) != mpos->second) { // splice out the item we want to move: list_type temp; temp.splice(temp.end(), s_data.cont, mpos->second); // and now place it at the end of the list: s_data.cont.splice(s_data.cont.end(), temp, temp.begin()); BOOST_ASSERT(*(s_data.cont.back().second) == k); // update index with new position: mpos->second = --(s_data.cont.end()); BOOST_ASSERT(&(mpos->first) == mpos->second->second); BOOST_ASSERT(&(mpos->first) == s_data.cont.back().second); } return s_data.cont.back().first; } // // if we get here then the item is not in the cache, // so create it: // network_boost::shared_ptr<Object const> result(new Object(k)); // // Add it to the list, and index it: // s_data.cont.push_back(value_type(result, static_cast<Key const*>(0))); s_data.index.insert(std::make_pair(k, --(s_data.cont.end()))); s_data.cont.back().second = &(s_data.index.find(k)->first); map_size_type s = s_data.index.size(); BOOST_ASSERT(s_data.index[k]->first.get() == result.get()); BOOST_ASSERT(&(s_data.index.find(k)->first) == s_data.cont.back().second); BOOST_ASSERT(s_data.index.find(k)->first == k); if(s > l_max_cache_size) { // // We have too many items in the list, so we need to start // popping them off the back of the list, but only if they're // being held uniquely by us: // list_iterator pos = s_data.cont.begin(); list_iterator last = s_data.cont.end(); while((pos != last) && (s > l_max_cache_size)) { if(pos->first.unique()) { list_iterator condemmed(pos); ++pos; // now remove the items from our containers, // then order has to be as follows: BOOST_ASSERT(s_data.index.find(*(condemmed->second)) != s_data.index.end()); s_data.index.erase(*(condemmed->second)); s_data.cont.erase(condemmed); --s; } else ++pos; } BOOST_ASSERT(s_data.index[k]->first.get() == result.get()); BOOST_ASSERT(&(s_data.index.find(k)->first) == s_data.cont.back().second); BOOST_ASSERT(s_data.index.find(k)->first == k); } return result; }
double get_Y() const { BOOST_ASSERT(gid_); return this->base_type::get_Y(gid_); }
static int isxdigit(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_XDIGIT); }
lcos::future<void> set_Y_async(double y) { BOOST_ASSERT(gid_); return this->base_type::set_Y_async(gid_, y); }
static int islower(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_LOWER); }
void set_X(double x) { BOOST_ASSERT(gid_); this->base_type::set_X(gid_, x); }
static int isspace(int ch) { BOOST_ASSERT(isascii_(ch)); return (ascii_char_types[ch] & BOOST_CC_SPACE); }
void set_Y(double y) { BOOST_ASSERT(gid_); this->base_type::set_Y(gid_, y); }
static int toupper(int ch) { BOOST_ASSERT(isascii_(ch)); return islower(ch) ? (ch - 'a' + 'A') : ch; }
void EdgeBasedGraphFactory::CompressGeometry() { SimpleLogger().Write() << "Removing graph geometry while preserving topology"; const unsigned original_number_of_nodes = m_node_based_graph->GetNumberOfNodes(); const unsigned original_number_of_edges = m_node_based_graph->GetNumberOfEdges(); Percent progress(original_number_of_nodes); for (const NodeID node_v : osrm::irange(0u, original_number_of_nodes)) { progress.printStatus(node_v); // only contract degree 2 vertices if (2 != m_node_based_graph->GetOutDegree(node_v)) { continue; } // don't contract barrier node if (m_barrier_nodes.end() != m_barrier_nodes.find(node_v)) { continue; } // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node if (m_restriction_map->IsViaNode(node_v)) { continue; } /* * reverse_e2 forward_e2 * u <---------- v -----------> w * ----------> <----------- * forward_e1 reverse_e1 * * Will be compressed to: * * reverse_e1 * u <---------- w * ----------> * forward_e1 * * If the edges are compatible. * */ const bool reverse_edge_order = !(m_node_based_graph->GetEdgeData(m_node_based_graph->BeginEdges(node_v)).forward); const EdgeID forward_e2 = m_node_based_graph->BeginEdges(node_v) + reverse_edge_order; BOOST_ASSERT(SPECIAL_EDGEID != forward_e2); BOOST_ASSERT(forward_e2 >= m_node_based_graph->BeginEdges(node_v) && forward_e2 < m_node_based_graph->EndEdges(node_v)); const EdgeID reverse_e2 = m_node_based_graph->BeginEdges(node_v) + 1 - reverse_edge_order; BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2); BOOST_ASSERT(reverse_e2 >= m_node_based_graph->BeginEdges(node_v) && reverse_e2 < m_node_based_graph->EndEdges(node_v)); const EdgeData &fwd_edge_data2 = m_node_based_graph->GetEdgeData(forward_e2); const EdgeData &rev_edge_data2 = m_node_based_graph->GetEdgeData(reverse_e2); const NodeID node_w = m_node_based_graph->GetTarget(forward_e2); BOOST_ASSERT(SPECIAL_NODEID != node_w); BOOST_ASSERT(node_v != node_w); const NodeID node_u = m_node_based_graph->GetTarget(reverse_e2); BOOST_ASSERT(SPECIAL_NODEID != node_u); BOOST_ASSERT(node_u != node_v); const EdgeID forward_e1 = m_node_based_graph->FindEdge(node_u, node_v); BOOST_ASSERT(SPECIAL_EDGEID != forward_e1); BOOST_ASSERT(node_v == m_node_based_graph->GetTarget(forward_e1)); const EdgeID reverse_e1 = m_node_based_graph->FindEdge(node_w, node_v); BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1); BOOST_ASSERT(node_v == m_node_based_graph->GetTarget(reverse_e1)); const EdgeData &fwd_edge_data1 = m_node_based_graph->GetEdgeData(forward_e1); const EdgeData &rev_edge_data1 = m_node_based_graph->GetEdgeData(reverse_e1); if (m_node_based_graph->FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID) { continue; } // this case can happen if two ways with different names overlap if (fwd_edge_data1.nameID != rev_edge_data1.nameID || fwd_edge_data2.nameID != rev_edge_data2.nameID) { continue; } if (fwd_edge_data1.IsCompatibleTo(fwd_edge_data2) && rev_edge_data1.IsCompatibleTo(rev_edge_data2)) { BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e1).nameID == m_node_based_graph->GetEdgeData(reverse_e1).nameID); BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e2).nameID == m_node_based_graph->GetEdgeData(reverse_e2).nameID); // Get distances before graph is modified const int forward_weight1 = m_node_based_graph->GetEdgeData(forward_e1).distance; const int forward_weight2 = m_node_based_graph->GetEdgeData(forward_e2).distance; BOOST_ASSERT(0 != forward_weight1); BOOST_ASSERT(0 != forward_weight2); const int reverse_weight1 = m_node_based_graph->GetEdgeData(reverse_e1).distance; const int reverse_weight2 = m_node_based_graph->GetEdgeData(reverse_e2).distance; BOOST_ASSERT(0 != reverse_weight1); BOOST_ASSERT(0 != reverse_weight2); const bool has_node_penalty = m_traffic_lights.find(node_v) != m_traffic_lights.end(); // add weight of e2's to e1 m_node_based_graph->GetEdgeData(forward_e1).distance += fwd_edge_data2.distance; m_node_based_graph->GetEdgeData(reverse_e1).distance += rev_edge_data2.distance; if (has_node_penalty) { m_node_based_graph->GetEdgeData(forward_e1).distance += speed_profile.traffic_signal_penalty; m_node_based_graph->GetEdgeData(reverse_e1).distance += speed_profile.traffic_signal_penalty; } // extend e1's to targets of e2's m_node_based_graph->SetTarget(forward_e1, node_w); m_node_based_graph->SetTarget(reverse_e1, node_u); // remove e2's (if bidir, otherwise only one) m_node_based_graph->DeleteEdge(node_v, forward_e2); m_node_based_graph->DeleteEdge(node_v, reverse_e2); // update any involved turn restrictions m_restriction_map->FixupStartingTurnRestriction(node_u, node_v, node_w); m_restriction_map->FixupArrivingTurnRestriction(node_u, node_v, node_w, *m_node_based_graph); m_restriction_map->FixupStartingTurnRestriction(node_w, node_v, node_u); m_restriction_map->FixupArrivingTurnRestriction(node_w, node_v, node_u, *m_node_based_graph); // store compressed geometry in container m_geometry_compressor.CompressEdge( forward_e1, forward_e2, node_v, node_w, forward_weight1 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0), forward_weight2); m_geometry_compressor.CompressEdge( reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0)); ++removed_node_count; } } SimpleLogger().Write() << "removed " << removed_node_count << " nodes"; m_geometry_compressor.PrintStatistics(); unsigned new_node_count = 0; unsigned new_edge_count = 0; for (const auto i : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes())) { if (m_node_based_graph->GetOutDegree(i) > 0) { ++new_node_count; new_edge_count += (m_node_based_graph->EndEdges(i) - m_node_based_graph->BeginEdges(i)); } } SimpleLogger().Write() << "new nodes: " << new_node_count << ", edges " << new_edge_count; SimpleLogger().Write() << "Node compression ratio: " << new_node_count / (double)original_number_of_nodes; SimpleLogger().Write() << "Edge compression ratio: " << new_edge_count / (double)original_number_of_edges; }