/** * Map the paths generated by the MCF solver into flows associated with nodes. * @param component the link graph component to be used. */ void FlowMapper::Run(LinkGraphJob &job) const { for (NodeID node_id = 0; node_id < job.Size(); ++node_id) { Node prev_node = job[node_id]; StationID prev = prev_node.Station(); PathList &paths = prev_node.Paths(); for (PathList::iterator i = paths.begin(); i != paths.end(); ++i) { Path *path = *i; uint flow = path->GetFlow(); if (flow == 0) break; Node node = job[path->GetNode()]; StationID via = node.Station(); StationID origin = job[path->GetOrigin()].Station(); assert(prev != via && via != origin); /* Mark all of the flow for local consumption at "first". */ node.Flows().AddFlow(origin, via, flow); if (prev != origin) { /* Pass some of the flow marked for local consumption at "prev" on * to this node. */ prev_node.Flows().PassOnFlow(origin, via, flow); } else { /* Prev node is origin. Simply add flow. */ prev_node.Flows().AddFlow(origin, via, flow); } } } for (NodeID node_id = 0; node_id < job.Size(); ++node_id) { /* Remove local consumption shares marked as invalid. */ Node node = job[node_id]; FlowStatMap &flows = node.Flows(); flows.FinalizeLocalConsumption(node.Station()); if (this->scale) { /* Scale by time the graph has been running without being compressed. */ uint runtime = job.JoinDate() - job.Settings().recalc_time - job.LastCompression(); for (FlowStatMap::iterator i = flows.begin(); i != flows.end(); ++i) { i->second.ScaleToMonthly(runtime); } } /* Clear paths. */ PathList &paths = node.Paths(); for (PathList::iterator i = paths.begin(); i != paths.end(); ++i) { delete *i; } paths.clear(); } }
/** * Constructor. * @param job Link graph job to work with. */ FlowEdgeIterator(LinkGraphJob &job) : job(job) { for (NodeID i = 0; i < job.Size(); ++i) { StationID st = job[i].Station(); if (st >= this->station_to_node.size()) { this->station_to_node.resize(st + 1); } this->station_to_node[st] = i; } }
/** * Run the second pass of the MCF calculation which assigns all remaining * demands to existing paths. * @param job Link graph job to calculate. */ MCF2ndPass::MCF2ndPass(LinkGraphJob &job) : MultiCommodityFlow(job) { this->max_saturation = UINT_MAX; // disable artificial cap on saturation PathVector paths; uint size = job.Size(); uint accuracy = job.Settings().accuracy; bool demand_left = true; while (demand_left) { demand_left = false; for (NodeID source = 0; source < size; ++source) { this->Dijkstra<CapacityAnnotation, FlowEdgeIterator>(source, paths); for (NodeID dest = 0; dest < size; ++dest) { Edge edge = this->job[source][dest]; Path *path = paths[dest]; if (edge.UnsatisfiedDemand() > 0 && path->GetFreeCapacity() > INT_MIN) { this->PushFlow(edge, path, accuracy, UINT_MAX); if (edge.UnsatisfiedDemand() > 0) demand_left = true; } } this->CleanupPaths(source, paths); } } }
/** * Run the first pass of the MCF calculation. * @param job Link graph job to calculate. */ MCF1stPass::MCF1stPass(LinkGraphJob &job) : MultiCommodityFlow(job) { PathVector paths; uint size = job.Size(); uint accuracy = job.Settings().accuracy; bool more_loops; do { more_loops = false; for (NodeID source = 0; source < size; ++source) { /* First saturate the shortest paths. */ this->Dijkstra<DistanceAnnotation, GraphEdgeIterator>(source, paths); for (NodeID dest = 0; dest < size; ++dest) { Edge edge = job[source][dest]; if (edge.UnsatisfiedDemand() > 0) { Path *path = paths[dest]; assert(path != NULL); /* Generally only allow paths that don't exceed the * available capacity. But if no demand has been assigned * yet, make an exception and allow any valid path *once*. */ if (path->GetFreeCapacity() > 0 && this->PushFlow(edge, path, accuracy, this->max_saturation) > 0) { /* If a path has been found there is a chance we can * find more. */ more_loops = more_loops || (edge.UnsatisfiedDemand() > 0); } else if (edge.UnsatisfiedDemand() == edge.Demand() && path->GetFreeCapacity() > INT_MIN) { this->PushFlow(edge, path, accuracy, UINT_MAX); } } } this->CleanupPaths(source, paths); } } while (more_loops || this->EliminateCycles()); }
/** * Constructor. * @param job Link graph job to work with. */ FlowEdgeIterator(LinkGraphJob &job) : job(job) { for (NodeID i = 0; i < job.Size(); ++i) { this->station_to_node[job[i].Station()] = i; } }
void DemandCalculator::CalcDemand(LinkGraphJob &job, Tscaler scaler) { NodeList supplies; NodeList demands; uint num_supplies = 0; uint num_demands = 0; for (NodeID node = 0; node < job.Size(); node++) { scaler.AddNode(job[node]); if (job[node].Supply() > 0) { supplies.push_back(node); num_supplies++; } if (job[node].Demand() > 0) { demands.push_back(node); num_demands++; } } if (num_supplies == 0 || num_demands == 0) return; /* Mean acceptance attributed to each node. If the distribution is * symmetric this is relative to remote supply, otherwise it is * relative to remote demand. */ scaler.SetDemandPerNode(num_demands); uint chance = 0; while (!supplies.empty() && !demands.empty()) { NodeID from_id = supplies.front(); supplies.pop_front(); for (uint i = 0; i < num_demands; ++i) { assert(!demands.empty()); NodeID to_id = demands.front(); demands.pop_front(); if (from_id == to_id) { /* Only one node with supply and demand left */ if (demands.empty() && supplies.empty()) return; demands.push_back(to_id); continue; } int32 supply = scaler.EffectiveSupply(job[from_id], job[to_id]); assert(supply > 0); /* Scale the distance by mod_dist around max_distance */ int32 distance = this->max_distance - (this->max_distance - (int32)job[from_id][to_id].Distance()) * this->mod_dist / 100; /* Scale the accuracy by distance around accuracy / 2 */ int32 divisor = this->accuracy * (this->mod_dist - 50) / 100 + this->accuracy * distance / this->max_distance + 1; assert(divisor > 0); uint demand_forw = 0; if (divisor <= supply) { /* At first only distribute demand if * effective supply / accuracy divisor >= 1 * Others are too small or too far away to be considered. */ demand_forw = supply / divisor; } else if (++chance > this->accuracy * num_demands * num_supplies) { /* After some trying, if there is still supply left, distribute * demand also to other nodes. */ demand_forw = 1; } demand_forw = min(demand_forw, job[from_id].UndeliveredSupply()); scaler.SetDemands(job, from_id, to_id, demand_forw); if (scaler.HasDemandLeft(job[to_id])) { demands.push_back(to_id); } else { num_demands--; } if (job[from_id].UndeliveredSupply() == 0) break; } if (job[from_id].UndeliveredSupply() != 0) { supplies.push_back(from_id); } else { num_supplies--; } } }