/* Cut cwnd and enter fast recovery mode upon triple dupack */ void TcpNewReno::DupAck (const TcpHeader& t, uint32_t count) { NS_LOG_FUNCTION (this << count); if (count == m_retxThresh && !m_inFastRec) { // triple duplicate ack triggers fast retransmit (RFC2582 sec.3 bullet #1) m_ssThresh = std::max (2 * m_segmentSize, BytesInFlight () / 2); m_cWnd = m_ssThresh + 3 * m_segmentSize; m_recover = m_highTxMark; m_inFastRec = true; NS_LOG_INFO ("Triple dupack. Enter fast recovery mode. Reset cwnd to " << m_cWnd << ", ssthresh to " << m_ssThresh << " at fast recovery seqnum " << m_recover); DoRetransmit (); } else if (m_inFastRec) { // Increase cwnd for every additional dupack (RFC2582, sec.3 bullet #3) m_cWnd += m_segmentSize; NS_LOG_INFO ("Dupack in fast recovery mode. Increase cwnd to " << m_cWnd); SendPendingData (m_connected); } else if (!m_inFastRec && m_limitedTx && m_txBuffer.SizeFromSequence (m_nextTxSequence) > 0) { // RFC3042 Limited transmit: Send a new packet for each duplicated ACK before fast retransmit NS_LOG_INFO ("Limited transmit"); uint32_t sz = SendDataPacket (m_nextTxSequence, m_segmentSize, true); m_nextTxSequence += sz; // Advance next tx sequence }; }
uint32_t WimaxMacQueue::GetFirstPacketPayloadSize (MacHeaderType::HeaderType packetType) { QueueElement element; for (std::deque<QueueElement>::const_iterator iter = m_queue.begin (); iter != m_queue.end (); ++iter) { element = *iter; if (element.m_hdrType.GetType () == packetType) { break; } } NS_LOG_INFO ("\t\t GetFirstPacketPayloadSize ()"); if (CheckForFragmentation (packetType)) { NS_LOG_INFO ("\t\t\t fullPayloadSize=" << element.m_packet->GetSize () << "\n\t\t\t fragmentOffset=" << element.m_fragmentOffset << "\n\t\t\t (fragment)payloadSize=" << element.m_packet->GetSize () - element.m_fragmentOffset); return element.m_packet->GetSize () - element.m_fragmentOffset; } NS_LOG_INFO ("\t\t payloadSize=" << element.m_packet->GetSize ()); return element.m_packet->GetSize (); }
void BuildingsPathlossTestCase::DoRun (void) { NS_LOG_FUNCTION (this); // the building basically occupies the negative x plane, so any node // in this area will fall in the building Ptr<Building> building1 = CreateObject<Building> (); building1->SetBoundaries (Box (-3000, -1, -4000, 4000.0, 0.0, 12)); building1->SetBuildingType (Building::Residential); building1->SetExtWallsType (Building::ConcreteWithWindows); building1->SetNFloors (3); Ptr<MobilityModel> mma = CreateMobilityModel (m_mobilityModelIndex1); Ptr<MobilityModel> mmb = CreateMobilityModel (m_mobilityModelIndex2); Ptr<HybridBuildingsPropagationLossModel> propagationLossModel = CreateObject<HybridBuildingsPropagationLossModel> (); propagationLossModel->SetAttribute ("Frequency", DoubleValue (m_freq)); propagationLossModel->SetAttribute ("Environment", EnumValue (m_env)); propagationLossModel->SetAttribute ("CitySize", EnumValue (m_city)); // cancel shadowing effect propagationLossModel->SetAttribute ("ShadowSigmaOutdoor", DoubleValue (0.0)); propagationLossModel->SetAttribute ("ShadowSigmaIndoor", DoubleValue (0.0)); propagationLossModel->SetAttribute ("ShadowSigmaExtWalls", DoubleValue (0.0)); double loss = propagationLossModel->GetLoss (mma, mmb); NS_LOG_INFO ("Calculated loss: " << loss); NS_LOG_INFO ("Theoretical loss: " << m_lossRef); NS_TEST_ASSERT_MSG_EQ_TOL (loss, m_lossRef, 0.1, "Wrong loss !"); Simulator::Destroy (); }
bool BSScheduler::CheckForFragmentation (Ptr<WimaxConnection> connection, int availableSymbols, WimaxPhy::ModulationType modulationType) { NS_LOG_INFO ("BS Scheduler, CheckForFragmentation"); if (connection->GetType () != Cid::TRANSPORT) { NS_LOG_INFO ("\t No Transport connction, Fragmentation IS NOT possible"); return false; } uint32_t availableByte = GetBs ()->GetPhy ()-> GetNrBytes (availableSymbols, modulationType); uint32_t headerSize = connection->GetQueue ()->GetFirstPacketHdrSize ( MacHeaderType::HEADER_TYPE_GENERIC); NS_LOG_INFO ("\t availableByte = " << availableByte << " headerSize = " << headerSize); if (availableByte > headerSize) { NS_LOG_INFO ("\t Fragmentation IS possible"); return true; } else { NS_LOG_INFO ("\t Fragmentation IS NOT possible"); return false; } }
uint32_t WimaxMacQueue::GetFirstPacketRequiredByte (MacHeaderType::HeaderType packetType) { NS_LOG_INFO ("\t GetFirstPacketRequiredByte ()"); uint32_t requiredByte = GetFirstPacketPayloadSize (packetType) + GetFirstPacketHdrSize (packetType); NS_LOG_INFO ("\t Required Bytes = " << requiredByte << std::endl); return requiredByte; }
bool IpcsClassifierRecord::CheckMatchProtocol (uint8_t proto) const { for (std::vector<uint8_t>::const_iterator iter = m_protocol.begin (); iter != m_protocol.end (); ++iter) { NS_LOG_INFO ("proto check match: pkt=" << (uint16_t) proto << " cls=" << (uint16_t) proto); if (proto == (*iter)) { return true; } } NS_LOG_INFO ("NOT OK!"); return false; }
bool IpcsClassifierRecord::CheckMatchSrcAddr (Ipv4Address srcAddress) const { for (std::vector<struct ipv4Addr>::const_iterator iter = m_srcAddr.begin (); iter != m_srcAddr.end (); ++iter) { NS_LOG_INFO ("src addr check match: pkt=" << srcAddress << " cls=" << (*iter).Address << "/" << (*iter).Mask); if (srcAddress.CombineMask ((*iter).Mask) == (*iter).Address) { return true; } } NS_LOG_INFO ("NOT OK!"); return false; }
bool IpcsClassifierRecord::CheckMatchDstPort (uint16_t port) const { for (std::vector<struct PortRange>::const_iterator iter = m_dstPortRange.begin (); iter != m_dstPortRange.end (); ++iter) { NS_LOG_INFO ("dst port check match: pkt=" << port << " cls= [" << (*iter).PortLow << " TO " << (*iter).PortHigh << "]"); if (port >= (*iter).PortLow && port <= (*iter).PortHigh) { return true; } } NS_LOG_INFO ("NOT OK!"); return false; }
void Ping6::HandleRead (Ptr<Socket> socket) { NS_LOG_FUNCTION (this << socket); Ptr<Packet> packet=0; Address from; while ((packet = socket->RecvFrom (from))) { if (Inet6SocketAddress::IsMatchingType (from)) { Ipv6Header hdr; Icmpv6Echo reply (0); Inet6SocketAddress address = Inet6SocketAddress::ConvertFrom (from); packet->RemoveHeader (hdr); uint8_t type; packet->CopyData (&type, sizeof(type)); switch (type) { case Icmpv6Header::ICMPV6_ECHO_REPLY: packet->RemoveHeader (reply); NS_LOG_INFO ("Received Echo Reply size = " << std::dec << packet->GetSize () << " bytes from " << address.GetIpv6 () << " id = " << (uint16_t)reply.GetId () << " seq = " << (uint16_t)reply.GetSeq ()); break; default: /* other type, discard */ break; } } } }
void Face::putBead(const Bead& bead) { NS_LOG_INFO (">> Bead: " << bead.getName()); shared_ptr<const Bead> beadPtr; try { beadPtr = bead.shared_from_this(); } catch (const bad_weak_ptr& e) { NS_LOG_INFO("Face::put WARNING: the supplied Data should be created using make_shared<Data>()"); beadPtr = make_shared<Bead>(bead); } m_impl->m_scheduler.scheduleEvent(time::seconds(0), [=] { m_impl->asyncPutBead(beadPtr); }); }
void Face::putData(const Data& data) { NS_LOG_INFO (">> Data: " << data.getName()); shared_ptr<const Data> dataPtr; try { dataPtr = data.shared_from_this(); } catch (const bad_weak_ptr& e) { NS_LOG_INFO("Face::put WARNING: the supplied Data should be created using make_shared<Data>()"); dataPtr = make_shared<Data>(data); } m_impl->m_scheduler.scheduleEvent(time::seconds(0), [=] { m_impl->asyncPutData(dataPtr); }); }
void CsmaChannel::PropagationCompleteEvent () { NS_LOG_FUNCTION (this << m_currentPkt); NS_LOG_INFO ("UID is " << m_currentPkt->GetUid () << ")"); NS_ASSERT (m_state == PROPAGATING); m_state = IDLE; }
/* New ACK (up to seqnum seq) received. Increase cwnd and call TcpSocketBase::NewAck() */ void TcpNewReno::NewAck (const SequenceNumber32& seq) { NS_LOG_FUNCTION (this << seq); NS_LOG_LOGIC ("TcpNewReno receieved ACK for seq " << seq << " cwnd " << m_cWnd << " ssthresh " << m_ssThresh); // Check for exit condition of fast recovery if (m_inFastRec && seq < m_recover) { // Partial ACK, partial window deflation (RFC2582 sec.3 bullet #5 paragraph 3) m_cWnd -= seq - m_txBuffer.HeadSequence (); m_cWnd += m_segmentSize; // increase cwnd NS_LOG_INFO ("Partial ACK in fast recovery: cwnd set to " << m_cWnd); TcpSocketBase::NewAck (seq); // update m_nextTxSequence and send new data if allowed by window DoRetransmit (); // Assume the next seq is lost. Retransmit lost packet return; } else if (m_inFastRec && seq >= m_recover) { // Full ACK (RFC2582 sec.3 bullet #5 paragraph 2, option 1) m_cWnd = std::min (m_ssThresh, BytesInFlight () + m_segmentSize); m_inFastRec = false; NS_LOG_INFO ("Received full ACK. Leaving fast recovery with cwnd set to " << m_cWnd); } // Increase of cwnd based on current phase (slow start or congestion avoidance) if (m_cWnd < m_ssThresh) { // Slow start mode, add one segSize to cWnd. Default m_ssThresh is 65535. (RFC2001, sec.1) m_cWnd += m_segmentSize; NS_LOG_INFO ("In SlowStart, updated to cwnd " << m_cWnd << " ssthresh " << m_ssThresh); } else { // Congestion avoidance mode, increase by (segSize*segSize)/cwnd. (RFC2581, sec.3.1) // To increase cwnd for one segSize per RTT, it should be (ackBytes*segSize)/cwnd double adder = static_cast<double> (m_segmentSize * m_segmentSize) / m_cWnd.Get (); adder = std::max (1.0, adder); m_cWnd += static_cast<uint32_t> (adder); NS_LOG_INFO ("In CongAvoid, updated to cwnd " << m_cWnd << " ssthresh " << m_ssThresh); } // Complete newAck processing TcpSocketBase::NewAck (seq); }
const PendingInterestId* Face::expressInterest(const Interest& interest, const OnData& onData, const OnTimeout& onTimeout) { NS_LOG_INFO (">> Interest: " << interest.getName()); shared_ptr<Interest> interestToExpress = make_shared<Interest>(interest); m_impl->m_scheduler.scheduleEvent(time::seconds(0), [=] { m_impl->asyncExpressInterest(interestToExpress, onData, onTimeout); }); return reinterpret_cast<const PendingInterestId*>(interestToExpress.get()); }
uint32_t WimaxMacQueue::GetFirstPacketHdrSize (MacHeaderType::HeaderType packetType) { QueueElement element; for (std::deque<QueueElement>::const_iterator iter = m_queue.begin (); iter != m_queue.end (); ++iter) { element = *iter; if (element.m_hdrType.GetType () == packetType) { break; } } NS_LOG_INFO ("\t\t GetFirstPacketHdrSize ()"); uint32_t hdrSize = 0; if (element.m_hdrType.GetType () == MacHeaderType::HEADER_TYPE_GENERIC) { hdrSize += element.m_hdr.GetSerializedSize (); NS_LOG_INFO ("\t\t\t m_hdr.GetSerializedSize=" << element.m_hdr.GetSerializedSize ()); } hdrSize += element.m_hdrType.GetSerializedSize (); NS_LOG_INFO ("\t\t\t m_hdrType.GetSerializedSize=" << element.m_hdrType.GetSerializedSize ()); if (CheckForFragmentation (packetType)) { NS_LOG_INFO ("\t\t\t fragSubhdrSize=2"); hdrSize += 2; } NS_LOG_INFO ("\t\t hdrSize=" << hdrSize); return hdrSize; }
const InterestFilterId* Face::setInterestFilter(const InterestFilter& interestFilter, const OnInterest& onInterest) { NS_LOG_INFO("Set Interest Filter << " << interestFilter); shared_ptr<InterestFilterRecord> filter = make_shared<InterestFilterRecord>(interestFilter, onInterest); m_impl->m_scheduler.scheduleEvent(time::seconds(0), [=] { m_impl->asyncSetInterestFilter(filter); }); return reinterpret_cast<const InterestFilterId*>(filter.get()); }
void Consumer::SendPacket() { if (!m_active) return; NS_LOG_FUNCTION_NOARGS(); uint32_t seq = std::numeric_limits<uint32_t>::max(); // invalid while (m_retxSeqs.size()) { seq = *m_retxSeqs.begin(); m_retxSeqs.erase(m_retxSeqs.begin()); break; } if (seq == std::numeric_limits<uint32_t>::max()) { if (m_seqMax != std::numeric_limits<uint32_t>::max()) { if (m_seq >= m_seqMax) { return; // we are totally done } } seq = m_seq++; } // shared_ptr<Name> nameWithSequence = make_shared<Name>(m_interestName); nameWithSequence->appendSequenceNumber(seq); // // shared_ptr<Interest> interest = make_shared<Interest> (); shared_ptr<Interest> interest = make_shared<Interest>(); interest->setNonce(m_rand->GetValue(0, std::numeric_limits<uint32_t>::max())); interest->setName(*nameWithSequence); time::milliseconds interestLifeTime(m_interestLifeTime.GetMilliSeconds()); interest->setInterestLifetime(interestLifeTime); // NS_LOG_INFO ("Requesting Interest: \n" << *interest); NS_LOG_INFO("> Interest for " << seq); WillSendOutInterest(seq); m_transmittedInterests(interest, this, m_face); m_face->onReceiveInterest(*interest); ScheduleNextPacket(); }
void VerifyingConsumer::OnData(shared_ptr<const Data> data) { if (!m_active) return; App::OnData(data); // tracing inside NS_LOG_FUNCTION(this << data); m_validator->validate(*data, bind(&VerifyingConsumer::ValidationPassed, this, _1), bind(&VerifyingConsumer::OnDataValidationFailed, this, _1, _2)); // NS_LOG_INFO ("Received content object: " << boost::cref(*data)); // This could be a problem...... uint32_t seq = data->getName().at(-1).toSequenceNumber(); NS_LOG_INFO("< DATA for " << seq); int hopCount = 0; auto ns3PacketTag = data->getTag<Ns3PacketTag>(); if (ns3PacketTag != nullptr) { // e.g., packet came from local node's cache FwHopCountTag hopCountTag; if (ns3PacketTag->getPacket()->PeekPacketTag(hopCountTag)) { hopCount = hopCountTag.Get(); NS_LOG_DEBUG("Hop count: " << hopCount); } } SeqTimeoutsContainer::iterator entry = m_seqLastDelay.find(seq); if (entry != m_seqLastDelay.end()) { m_lastRetransmittedInterestDataDelay(this, seq, Simulator::Now() - entry->time, hopCount); } entry = m_seqFullDelay.find(seq); if (entry != m_seqFullDelay.end()) { m_firstInterestDataDelay(this, seq, Simulator::Now() - entry->time, m_seqRetxCounts[seq], hopCount); } m_seqRetxCounts.erase(seq); m_seqFullDelay.erase(seq); m_seqLastDelay.erase(seq); m_seqTimeouts.erase(seq); m_retxSeqs.erase(seq); m_rtt->AckSeq(SequenceNumber32(seq)); }
void PhyTxStatsCalculator::DlPhyTransmission (PhyTransmissionStatParameters params) { NS_LOG_FUNCTION (this << params.m_cellId << params.m_imsi << params.m_timestamp << params.m_rnti << params.m_layer << params.m_mcs << params.m_size << params.m_rv << params.m_ndi); NS_LOG_INFO ("Write DL Tx Phy Stats in " << GetDlTxOutputFilename ().c_str ()); std::ofstream outFile; if ( m_dlTxFirstWrite == true ) { outFile.open (GetDlOutputFilename ().c_str ()); if (!outFile.is_open ()) { NS_LOG_ERROR ("Can't open file " << GetDlTxOutputFilename ().c_str ()); return; } m_dlTxFirstWrite = false; //outFile << "% time\tcellId\tIMSI\tRNTI\tlayer\tmcs\tsize\trv\tndi"; // txMode is not available at dl tx side outFile << "% time\tcellId\tIMSI\tRNTI\tlayer\tmcs\tsize\trv\tndi"; outFile << std::endl; } else { outFile.open (GetDlTxOutputFilename ().c_str (), std::ios_base::app); if (!outFile.is_open ()) { NS_LOG_ERROR ("Can't open file " << GetDlTxOutputFilename ().c_str ()); return; } } // outFile << Simulator::Now ().GetNanoSeconds () / (double) 1e9 << "\t"; outFile << params.m_timestamp << "\t"; outFile << (uint32_t) params.m_cellId << "\t"; outFile << params.m_imsi << "\t"; outFile << params.m_rnti << "\t"; //outFile << (uint32_t) params.m_txMode << "\t"; // txMode is not available at dl tx side outFile << (uint32_t) params.m_layer << "\t"; outFile << (uint32_t) params.m_mcs << "\t"; outFile << params.m_size << "\t"; outFile << (uint32_t) params.m_rv << "\t"; outFile << (uint32_t) params.m_ndi << std::endl; outFile.close (); }
bool CsmaChannel::TransmitEnd () { NS_LOG_FUNCTION (this << m_currentPkt << m_currentSrc); NS_LOG_INFO ("UID is " << m_currentPkt->GetUid () << ")"); NS_ASSERT (m_state == TRANSMITTING); m_state = PROPAGATING; bool retVal = true; if (!IsActive (m_currentSrc)) { NS_LOG_ERROR ("CsmaChannel::TransmitEnd(): Seclected source was detached before the end of the transmission"); retVal = false; } NS_LOG_LOGIC ("Schedule event in " << m_delay.GetSeconds () << " sec"); NS_LOG_LOGIC ("Receive"); std::vector<CsmaDeviceRec>::iterator it; uint32_t devId = 0; for (it = m_deviceList.begin (); it < m_deviceList.end (); it++) { if (it->IsActive ()) { // schedule reception events Simulator::ScheduleWithContext (it->devicePtr->GetNode ()->GetId (), m_delay, &CsmaNetDevice::Receive, it->devicePtr, m_currentPkt->Copy (), m_deviceList[m_currentSrc].devicePtr); } devId++; } // also schedule for the tx side to go back to IDLE Simulator::Schedule (m_delay, &CsmaChannel::PropagationCompleteEvent, this); return retVal; }
LenaTestMimoSuite::LenaTestMimoSuite () : TestSuite ("lte-mimo", SYSTEM) { NS_LOG_INFO ("creating LenaMimoTestCase"); // RR DOWNLINK- DISTANCE 300 // interval 1 : [0.1, 0.2) sec TxMode 0: MCS 20 -> TB size 1191 bytes // interval 2 : [0.3, 0.4) sec TxMode 1: MCS 26 -> TB size 1836 bytes // interval 3 : [0.5, 0.6) sec TxMode 2: MCS 18 -> TB size 967 bytes (x2 layers) // --> std::vector<uint32_t> estThrDl; estThrDl.push_back (119100); // interval 1 : estimated throughput for TxMode 1 estThrDl.push_back (183600); // interval 2 : estimated throughput for TxMode 2 estThrDl.push_back (193400); // interval 3 : estimated throughput for TxMode 3 AddTestCase (new LenaMimoTestCase(300, estThrDl, "ns3::RrFfMacScheduler", true), TestCase::QUICK); AddTestCase (new LenaMimoTestCase(300, estThrDl, "ns3::PfFfMacScheduler", true), TestCase::QUICK); AddTestCase (new LenaMimoTestCase(300, estThrDl, "ns3::RrFfMacScheduler", false), TestCase::QUICK); AddTestCase (new LenaMimoTestCase(300, estThrDl, "ns3::PfFfMacScheduler", false), TestCase::QUICK); }
bool WimaxMacQueue::CheckForFragmentation (MacHeaderType::HeaderType packetType) { QueueElement element; for (std::deque<QueueElement>::const_iterator iter = m_queue.begin (); iter != m_queue.end (); ++iter) { element = *iter; if (element.m_hdrType.GetType () == packetType) { break; } } if (element.m_fragmentation) { NS_LOG_INFO ("FRAG_DEBUG: CheckForFragmentation" "\n\t\t m_fragmentation is true " << std::endl); } return element.m_fragmentation; }
Ptr<const Packet> BlockAckManager::GetNextPacket (WifiMacHeader &hdr) { NS_LOG_FUNCTION (this << &hdr); Ptr<const Packet> packet = 0; if (m_retryPackets.size () > 0) { CleanupBuffers (); PacketQueueI queueIt = m_retryPackets.front (); m_retryPackets.pop_front (); packet = queueIt->packet; hdr = queueIt->hdr; hdr.SetRetry (); NS_LOG_INFO ("Retry packet seq=" << hdr.GetSequenceNumber ()); uint8_t tid = hdr.GetQosTid (); Mac48Address recipient = hdr.GetAddr1 (); if (ExistsAgreementInState (recipient, tid, OriginatorBlockAckAgreement::ESTABLISHED) || SwitchToBlockAckIfNeeded (recipient, tid, hdr.GetSequenceNumber ())) { hdr.SetQosAckPolicy (WifiMacHeader::BLOCK_ACK); } else { /* From section 9.10.3 in IEEE802.11e standard: * In order to improve efficiency, originators using the Block Ack facility * may send MPDU frames with the Ack Policy subfield in QoS control frames * set to Normal Ack if only a few MPDUs are available for transmission.[...] * When there are sufficient number of MPDUs, the originator may switch back to * the use of Block Ack. */ hdr.SetQosAckPolicy (WifiMacHeader::NORMAL_ACK); AgreementsI i = m_agreements.find (std::make_pair (recipient, tid)); i->second.second.erase (queueIt); } } return packet; }
/* Retransmit timeout */ void TcpNewReno::Retransmit (void) { NS_LOG_FUNCTION (this); NS_LOG_LOGIC (this << " ReTxTimeout Expired at time " << Simulator::Now ().GetSeconds ()); m_inFastRec = false; // If erroneous timeout in closed/timed-wait state, just return if (m_state == CLOSED || m_state == TIME_WAIT) return; // If all data are received (non-closing socket and nothing to send), just return if (m_state <= ESTABLISHED && m_txBuffer.HeadSequence () >= m_highTxMark) return; // According to RFC2581 sec.3.1, upon RTO, ssthresh is set to half of flight // size and cwnd is set to 1*MSS, then the lost packet is retransmitted and // TCP back to slow start m_ssThresh = std::max (2 * m_segmentSize, BytesInFlight () / 2); m_cWnd = m_segmentSize; m_nextTxSequence = m_txBuffer.HeadSequence (); // Restart from highest Ack NS_LOG_INFO ("RTO. Reset cwnd to " << m_cWnd << ", ssthresh to " << m_ssThresh << ", restart from seqnum " << m_nextTxSequence); m_rtt->IncreaseMultiplier (); // Double the next RTO DoRetransmit (); // Retransmit the packet }
bool CsmaChannel::TransmitStart (Ptr<Packet> p, uint32_t srcId) { NS_LOG_FUNCTION (this << p << srcId); NS_LOG_INFO ("UID is " << p->GetUid () << ")"); if (m_state != IDLE) { NS_LOG_WARN ("CsmaChannel::TransmitStart(): State is not IDLE"); return false; } if (!IsActive (srcId)) { NS_LOG_ERROR ("CsmaChannel::TransmitStart(): Seclected source is not currently attached to network"); return false; } NS_LOG_LOGIC ("switch to TRANSMITTING"); m_currentPkt = p; m_currentSrc = srcId; m_state = TRANSMITTING; return true; }
void LenaMimoTestCase::GetRlcBufferSample (Ptr<RadioBearerStatsCalculator> rlcStats, uint64_t imsi, uint8_t lcId) { m_dlDataRxed.push_back (rlcStats->GetDlRxData (imsi, lcId)); NS_LOG_INFO (Simulator::Now () << "\t get bytes " << m_dlDataRxed.at (m_dlDataRxed.size () - 1)); }
EpsGtpuHeaderTestCase::EpsGtpuHeaderTestCase () : TestCase ("Check header coding and decoding") { NS_LOG_INFO ("Creating EpsGtpuHeaderTestCase"); }
LenaTestPssFfMacSchedulerSuite::LenaTestPssFfMacSchedulerSuite () : TestSuite ("lte-pss-ff-mac-scheduler", SYSTEM) { NS_LOG_INFO ("creating LenaTestPssFfMacSchedulerSuite"); bool errorModel = false; // General config // Traffic: UDP traffic with fixed rate // Token generation rate = traffic rate // RLC header length = 2 bytes, PDCP header = 2 bytes // Simulation time = 1.0 sec // Throughput in this file is calculated in RLC layer //Test Case 1: homogeneous flow test in PSS (same distance) // DOWNLINK -> DISTANCE 0 -> MCS 28 -> Itbs 26 (from table 7.1.7.2.1-1 of 36.2 13) // Traffic info // UDP traffic: payload size = 200 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 232000 byte/rate // Totol bandwidth: 24 PRB at Itbs 26 -> 2196 -> 2196000 byte/sec // 1 user -> 232000 * 1 = 232000 < 2196000 -> throughput = 232000 byte/sec // 3 user -> 232000 * 3 = 696000 < 2196000 -> througphut = 232000 byte/sec // 6 user -> 232000 * 6 = 139200 < 2196000 -> throughput = 232000 byte/sec // 12 user -> 232000 * 12 = 2784000 > 2196000 -> throughput = 2196000 / 12 = 183000 byte/sec // UPLINK -> DISTANCE 0 -> MCS 28 -> Itbs 26 (from table 7.1.7.2.1-1 of 36.2 13) // 1 user -> 25 PRB at Itbs 26 -> 2292 -> 2292000 > 232000 -> throughput = 232000 bytes/sec // 3 users -> 8 PRB at Itbs 26 -> 749 -> 749000 > 232000 -> throughput = 232000 bytes/sec // 6 users -> 4 PRB at Itbs 26 -> 373 -> 373000 > 232000 -> throughput = 232000 bytes/sec // 12 users -> 2 PRB at Itbs 26 -> 185 -> 185000 < 232000 -> throughput = 185000 bytes/sec AddTestCase (new LenaPssFfMacSchedulerTestCase1 (1,0,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (3,0,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (6,0,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); //AddTestCase (new LenaPssFfMacSchedulerTestCase1 (12,0,183000,185000,200,1,errorModel));// simulation time = 1.5, otherwise, ul test will fail // DOWNLINK - DISTANCE 4800 -> MCS 22 -> Itbs 20 (from table 7.1.7.2.1-1 of 36.213) // Traffic info // UDP traffic: payload size = 200 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 232000 byte/rate // Totol bandwidth: 24 PRB at Itbs 20 -> 1383 -> 1383000 byte/sec // 1 user -> 903000 * 1 = 232000 < 1383000 -> throughput = 232000 byte/sec // 3 user -> 232000 * 3 = 696000 < 1383000 -> througphut = 232000 byte/sec // 6 user -> 232000 * 6 = 139200 > 1383000 -> throughput = 1383000 / 6 = 230500 byte/sec // 12 user -> 232000 * 12 = 2784000 > 1383000 -> throughput = 1383000 / 12 = 115250 byte/sec // UPLINK - DISTANCE 4800 -> MCS 14 -> Itbs 13 (from table 7.1.7.2.1-1 of 36.213) // 1 user -> 25 PRB at Itbs 13 -> 807 -> 807000 > 232000 -> throughput = 232000 bytes/sec // 3 users -> 8 PRB at Itbs 13 -> 253 -> 253000 > 232000 -> throughput = 232000 bytes/sec // 6 users -> 4 PRB at Itbs 13 -> 125 -> 125000 < 232000 -> throughput = 125000 bytes/sec // after the patch enforcing min 3 PRBs per UE: // 12 users -> 3 PRB at Itbs 13 -> 93 bytes * 8/12 UE/TTI -> 62000 < 232000 -> throughput = 62000 bytes/sec AddTestCase (new LenaPssFfMacSchedulerTestCase1 (1,4800,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (3,4800,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (6,4800,230500,125000,200,1,errorModel), TestCase::EXTENSIVE); //AddTestCase (new LenaPssFfMacSchedulerTestCase1 (12,4800,115250,62000,200,1,errorModel)); // simulation time = 1.5, otherwise, ul test will fail // DOWNLINK - DISTANCE 6000 -> MCS 20 -> Itbs 18 (from table 7.1.7.2.1-1 of 36.213) // Traffic info // UDP traffic: payload size = 200 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 232000 byte/rate // Totol bandwidth: 24 PRB at Itbs 18 -> 1191 -> 1191000 byte/sec // 1 user -> 903000 * 1 = 232000 < 1191000 -> throughput = 232000 byte/sec // 3 user -> 232000 * 3 = 696000 < 1191000 -> througphut = 232000 byte/sec // 6 user -> 232000 * 6 = 1392000 > 1191000 -> throughput = 1191000 / 6 = 198500 byte/sec // 12 user -> 232000 * 12 = 2784000 > 1191000 -> throughput = 1191000 / 12 = 99250 byte/sec // UPLINK - DISTANCE 6000 -> MCS 12 -> Itbs 11 (from table 7.1.7.2.1-1 of 36.213) // 1 user -> 25 PRB at Itbs 11 -> 621 -> 621000 > 232000 -> throughput = 232000 bytes/sec // 3 users -> 8 PRB at Itbs 11 -> 201 -> 201000 < 232000 -> throughput = 201000 bytes/sec // 6 users -> 4 PRB at Itbs 11 -> 97 -> 97000 < 232000 -> throughput = 97000 bytes/sec // after the patch enforcing min 3 PRBs per UE: // 12 users -> 3 PRB at Itbs 11 -> 73 bytes * 8/12 UE/TTI -> 48667 < 232000 -> throughput = 48667 bytes/sec AddTestCase (new LenaPssFfMacSchedulerTestCase1 (1,6000,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (3,6000,232000,201000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (6,6000,198500,97000,200,1,errorModel), TestCase::EXTENSIVE); //AddTestCase (new LenaPssFfMacSchedulerTestCase1 (12,6000,99250,48667,200,1, errorModel)); // simulation time = 1.5, otherwise, ul test will fail // DOWNLINK - DISTANCE 10000 -> MCS 14 -> Itbs 13 (from table 7.1.7.2.1-1 of 36.213) // Traffic info // UDP traffic: payload size = 200 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 232000 byte/rate // Totol bandwidth: 24 PRB at Itbs 13 -> 775 -> 775000 byte/sec // 1 user -> 903000 * 1 = 232000 < 775000 -> throughput = 232000 byte/sec // 3 user -> 232000 * 3 = 696000 > 775000 -> througphut = 232000 byte/sec // 6 user -> 232000 * 6 = 139200 > 775000 -> throughput = 775000 / 6 = 129166 byte/sec // 12 user -> 232000 * 12 = 2784000 > 775000 -> throughput = 775000 / 12 = 64583 byte/sec // UPLINK - DISTANCE 10000 -> MCS 8 -> Itbs 8 (from table 7.1.7.2.1-1 of 36.213) // 1 user -> 24 PRB at Itbs 8 -> 437 -> 437000 > 232000 -> throughput = 232000 bytes/sec // 3 users -> 8 PRB at Itbs 8 -> 137 -> 137000 < 232000 -> throughput = 137000 bytes/sec // 6 users -> 4 PRB at Itbs 8 -> 67 -> 67000 < 232000 -> throughput = 67000 bytes/sec // after the patch enforcing min 3 PRBs per UE: // 12 users -> 3 PRB at Itbs 8 -> 49 bytes * 8/12 UE/TTI -> 32667 < 232000 -> throughput = 32667 bytes/sec AddTestCase (new LenaPssFfMacSchedulerTestCase1 (1,10000,232000,232000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (3,10000,232000,137000,200,1,errorModel), TestCase::EXTENSIVE); AddTestCase (new LenaPssFfMacSchedulerTestCase1 (6,10000,129166,67000,200,1,errorModel), TestCase::EXTENSIVE); //AddTestCase (new LenaPssFfMacSchedulerTestCase1 (12,10000,64583,32667,200,1,errorModel));// simulation time = 1.5, otherwise, ul test will fail // Test Case 2: homogeneous flow test in PSS (different distance) // Traffic1 info // UDP traffic: payload size = 100 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 132000 byte/rate // Maximum throughput = 4 / ( 1/2196000 + 1/1191000 + 1/1383000 + 1/775000 ) = 1209046 byte/s // 132000 * 4 = 528000 < 1209046 -> estimated throughput in downlink = 132000 byte/sec std::vector<uint16_t> dist1; dist1.push_back (0); // User 0 distance --> MCS 28 dist1.push_back (4800); // User 1 distance --> MCS 22 dist1.push_back (6000); // User 2 distance --> MCS 20 dist1.push_back (10000); // User 3 distance --> MCS 14 std::vector<uint16_t> packetSize1; packetSize1.push_back (100); packetSize1.push_back (100); packetSize1.push_back (100); packetSize1.push_back (100); std::vector<uint32_t> estThrPssDl1; estThrPssDl1.push_back (132000); // User 0 estimated TTI throughput from PSS estThrPssDl1.push_back (132000); // User 1 estimated TTI throughput from PSS estThrPssDl1.push_back (132000); // User 2 estimated TTI throughput from PSS estThrPssDl1.push_back (132000); // User 3 estimated TTI throughput from PSS AddTestCase (new LenaPssFfMacSchedulerTestCase2 (dist1,estThrPssDl1,packetSize1,1,errorModel), TestCase::QUICK); // Traffic2 info // UDP traffic: payload size = 200 bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> 232000 byte/rate // Maximum throughput = 4 / ( 1/2196000 + 1/1191000 + 1/1383000 + 1/775000 ) = 1209046 byte/s // 232000 * 4 = 928000 < 1209046 -> estimated throughput in downlink = 928000 / 4 = 230000 byte/sec std::vector<uint16_t> dist2; dist2.push_back (0); // User 0 distance --> MCS 28 dist2.push_back (4800); // User 1 distance --> MCS 22 dist2.push_back (6000); // User 2 distance --> MCS 20 dist2.push_back (10000); // User 3 distance --> MCS 14 std::vector<uint16_t> packetSize2; packetSize2.push_back (200); packetSize2.push_back (200); packetSize2.push_back (200); packetSize2.push_back (200); std::vector<uint32_t> estThrPssDl2; estThrPssDl2.push_back (230000); // User 0 estimated TTI throughput from PSS estThrPssDl2.push_back (230000); // User 1 estimated TTI throughput from PSS estThrPssDl2.push_back (230000); // User 2 estimated TTI throughput from PSS estThrPssDl2.push_back (230000); // User 3 estimated TTI throughput from PSS AddTestCase (new LenaPssFfMacSchedulerTestCase2 (dist2,estThrPssDl2,packetSize2,1,errorModel), TestCase::QUICK); // Test Case 3: heterogeneous flow test in PSS // UDP traffic: payload size = [100,200,300] bytes, interval = 1 ms // UDP rate in scheduler: (payload + RLC header + PDCP header + IP header + UDP header) * 1000 byte/sec -> [132000, 232000, 332000] byte/rate // Maximum throughput = 3 / ( 1/2196000 + 1/1191000 + 1/1383000) = 1486569 byte/s // 132000 + 232000 + 332000 = 696000 < 1486569 -> estimated throughput in downlink = [132000, 232000, 332000] byte/sec std::vector<uint16_t> dist3; dist3.push_back (0); // User 0 distance --> MCS 28 dist3.push_back (4800); // User 1 distance --> MCS 22 dist3.push_back (6000); // User 2 distance --> MCS 20 std::vector<uint16_t> packetSize3; packetSize3.push_back (100); packetSize3.push_back (200); packetSize3.push_back (300); std::vector<uint32_t> estThrPssDl3; estThrPssDl3.push_back (132000); // User 0 estimated TTI throughput from PSS estThrPssDl3.push_back (232000); // User 1 estimated TTI throughput from PSS estThrPssDl3.push_back (332000); // User 2 estimated TTI throughput from PSS AddTestCase (new LenaPssFfMacSchedulerTestCase2 (dist3,estThrPssDl3,packetSize3,1,errorModel), TestCase::QUICK); }
void LenaPssFfMacSchedulerTestCase2::DoRun (void) { if (!m_errorModelEnabled) { Config::SetDefault ("ns3::LteSpectrumPhy::CtrlErrorModelEnabled", BooleanValue (false)); Config::SetDefault ("ns3::LteSpectrumPhy::DataErrorModelEnabled", BooleanValue (false)); } Config::SetDefault ("ns3::LteHelper::UseIdealRrc", BooleanValue (true)); Ptr<LteHelper> lteHelper = CreateObject<LteHelper> (); Ptr<PointToPointEpcHelper> epcHelper = CreateObject<PointToPointEpcHelper> (); lteHelper->SetEpcHelper (epcHelper); Ptr<Node> pgw = epcHelper->GetPgwNode (); // Create a single RemoteHost NodeContainer remoteHostContainer; remoteHostContainer.Create (1); Ptr<Node> remoteHost = remoteHostContainer.Get (0); InternetStackHelper internet; internet.Install (remoteHostContainer); // Create the Internet PointToPointHelper p2ph; p2ph.SetDeviceAttribute ("DataRate", DataRateValue (DataRate ("100Gb/s"))); p2ph.SetDeviceAttribute ("Mtu", UintegerValue (1500)); p2ph.SetChannelAttribute ("Delay", TimeValue (Seconds (0.001))); NetDeviceContainer internetDevices = p2ph.Install (pgw, remoteHost); Ipv4AddressHelper ipv4h; ipv4h.SetBase ("1.0.0.0", "255.0.0.0"); Ipv4InterfaceContainer internetIpIfaces = ipv4h.Assign (internetDevices); // interface 0 is localhost, 1 is the p2p device Ipv4Address remoteHostAddr = internetIpIfaces.GetAddress (1); Ipv4StaticRoutingHelper ipv4RoutingHelper; Ptr<Ipv4StaticRouting> remoteHostStaticRouting = ipv4RoutingHelper.GetStaticRouting (remoteHost->GetObject<Ipv4> ()); remoteHostStaticRouting->AddNetworkRouteTo (Ipv4Address ("7.0.0.0"), Ipv4Mask ("255.0.0.0"), 1); // LogComponentDisableAll (LOG_LEVEL_ALL); //LogComponentEnable ("LenaTestPssFfMacCheduler", LOG_LEVEL_ALL); lteHelper->SetAttribute ("PathlossModel", StringValue ("ns3::FriisSpectrumPropagationLossModel")); // Create Nodes: eNodeB and UE NodeContainer enbNodes; NodeContainer ueNodes; enbNodes.Create (1); ueNodes.Create (m_nUser); // Install Mobility Model MobilityHelper mobility; mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel"); mobility.Install (enbNodes); mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel"); mobility.Install (ueNodes); // Create Devices and install them in the Nodes (eNB and UE) NetDeviceContainer enbDevs; NetDeviceContainer ueDevs; lteHelper->SetSchedulerType ("ns3::PssFfMacScheduler"); enbDevs = lteHelper->InstallEnbDevice (enbNodes); ueDevs = lteHelper->InstallUeDevice (ueNodes); Ptr<LteEnbNetDevice> lteEnbDev = enbDevs.Get (0)->GetObject<LteEnbNetDevice> (); Ptr<LteEnbPhy> enbPhy = lteEnbDev->GetPhy (); enbPhy->SetAttribute ("TxPower", DoubleValue (30.0)); enbPhy->SetAttribute ("NoiseFigure", DoubleValue (5.0)); // Set UEs' position and power for (int i = 0; i < m_nUser; i++) { Ptr<ConstantPositionMobilityModel> mm = ueNodes.Get (i)->GetObject<ConstantPositionMobilityModel> (); mm->SetPosition (Vector (m_dist.at (i), 0.0, 0.0)); Ptr<LteUeNetDevice> lteUeDev = ueDevs.Get (i)->GetObject<LteUeNetDevice> (); Ptr<LteUePhy> uePhy = lteUeDev->GetPhy (); uePhy->SetAttribute ("TxPower", DoubleValue (23.0)); uePhy->SetAttribute ("NoiseFigure", DoubleValue (9.0)); } // Install the IP stack on the UEs internet.Install (ueNodes); Ipv4InterfaceContainer ueIpIface; ueIpIface = epcHelper->AssignUeIpv4Address (NetDeviceContainer (ueDevs)); // Assign IP address to UEs for (uint32_t u = 0; u < ueNodes.GetN (); ++u) { Ptr<Node> ueNode = ueNodes.Get (u); // Set the default gateway for the UE Ptr<Ipv4StaticRouting> ueStaticRouting = ipv4RoutingHelper.GetStaticRouting (ueNode->GetObject<Ipv4> ()); ueStaticRouting->SetDefaultRoute (epcHelper->GetUeDefaultGatewayAddress (), 1); } // Attach a UE to a eNB lteHelper->Attach (ueDevs, enbDevs.Get (0)); // Activate an EPS bearer on all UEs for (uint32_t u = 0; u < ueNodes.GetN (); ++u) { Ptr<NetDevice> ueDevice = ueDevs.Get (u); GbrQosInformation qos; qos.gbrDl = (m_packetSize.at (u) + 32) * (1000 / m_interval) * 8; // bit/s, considering IP, UDP, RLC, PDCP header size qos.gbrUl = (m_packetSize.at (u) + 32) * (1000 / m_interval) * 8; qos.mbrDl = qos.gbrDl; qos.mbrUl = qos.gbrUl; enum EpsBearer::Qci q = EpsBearer::GBR_CONV_VOICE; EpsBearer bearer (q, qos); lteHelper->ActivateDedicatedEpsBearer (ueDevice, bearer, EpcTft::Default ()); } // Install downlind and uplink applications uint16_t dlPort = 1234; uint16_t ulPort = 2000; PacketSinkHelper dlPacketSinkHelper ("ns3::UdpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), dlPort)); PacketSinkHelper ulPacketSinkHelper ("ns3::UdpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), ulPort)); ApplicationContainer clientApps; ApplicationContainer serverApps; for (uint32_t u = 0; u < ueNodes.GetN (); ++u) { ++ulPort; serverApps.Add (dlPacketSinkHelper.Install (ueNodes.Get (u))); // receive packets from remotehost serverApps.Add (ulPacketSinkHelper.Install (remoteHost)); // receive packets from UEs UdpClientHelper dlClient (ueIpIface.GetAddress (u), dlPort); // uplink packets generator dlClient.SetAttribute ("Interval", TimeValue (MilliSeconds (m_interval))); dlClient.SetAttribute ("MaxPackets", UintegerValue (1000000)); dlClient.SetAttribute ("PacketSize", UintegerValue (m_packetSize.at (u))); UdpClientHelper ulClient (remoteHostAddr, ulPort); // downlink packets generator ulClient.SetAttribute ("Interval", TimeValue (MilliSeconds (m_interval))); ulClient.SetAttribute ("MaxPackets", UintegerValue (1000000)); ulClient.SetAttribute ("PacketSize", UintegerValue (m_packetSize.at (u))); clientApps.Add (dlClient.Install (remoteHost)); clientApps.Add (ulClient.Install (ueNodes.Get (u))); } serverApps.Start (Seconds (0.030)); clientApps.Start (Seconds (0.030)); double statsStartTime = 0.04; // need to allow for RRC connection establishment + SRS double statsDuration = 0.5; double tolerance = 0.1; Simulator::Stop (Seconds (statsStartTime + statsDuration - 0.0001)); lteHelper->EnableRlcTraces (); Ptr<RadioBearerStatsCalculator> rlcStats = lteHelper->GetRlcStats (); rlcStats->SetAttribute ("StartTime", TimeValue (Seconds (statsStartTime))); rlcStats->SetAttribute ("EpochDuration", TimeValue (Seconds (statsDuration))); Simulator::Run (); /** * Check that the downlink assignation is done in a "token bank fair queue" manner */ NS_LOG_INFO ("DL - Test with " << m_nUser << " user(s)"); std::vector <uint64_t> dlDataRxed; for (int i = 0; i < m_nUser; i++) { // get the imsi uint64_t imsi = ueDevs.Get (i)->GetObject<LteUeNetDevice> ()->GetImsi (); // get the lcId uint8_t lcId = 4; dlDataRxed.push_back (rlcStats->GetDlRxData (imsi, lcId)); NS_LOG_INFO ("\tUser " << i << " dist " << m_dist.at (i) << " imsi " << imsi << " bytes rxed " << (double)dlDataRxed.at (i) << " thr " << (double)dlDataRxed.at (i) / statsDuration << " ref " << m_estThrPssDl.at (i)); } for (int i = 0; i < m_nUser; i++) { NS_TEST_ASSERT_MSG_EQ_TOL ((double)dlDataRxed.at (i) / statsDuration, m_estThrPssDl.at (i), m_estThrPssDl.at (i) * tolerance, " Unfair Throughput!"); } Simulator::Destroy (); }
void AccountingConsumer::SendPacket() { if (!m_active) return; NS_LOG_FUNCTION_NOARGS(); uint32_t seq = std::numeric_limits<uint32_t>::max(); // invalid while (m_retxSeqs.size()) { seq = *m_retxSeqs.begin(); m_retxSeqs.erase(m_retxSeqs.begin()); NS_LOG_DEBUG("=interest seq " << seq << " from m_retxSeqs"); break; } seq = 0; shared_ptr<Name> nameWithSequence = make_shared<Name>(m_interestName); nameWithSequence->appendSequenceNumber(seq); shared_ptr<Interest> interest = make_shared<Interest>(); interest->setNonce(m_rand.GetValue()); interest->setName(*nameWithSequence); // Note: we're setting this randomly, just to test the encoding/decoding std::vector<uint64_t> payload; payload.push_back(seq); payload.push_back(seq); payload.push_back(seq); payload.push_back(seq); interest->setPayload(payload); // NS_LOG_INFO ("Requesting Interest: \n" << *interest); // std::cout << "> " << m_id << ": Interest for " << seq << ", Total: " << m_seq << ", face: " << m_face->getId() << std::endl; NS_LOG_INFO("> Interest for " << seq << ", Total: " << m_seq << ", face: " << m_face->getId()); NS_LOG_DEBUG("Trying to add " << seq << " with " << Simulator::Now() << ". already " << m_seqTimeouts.size() << " items"); m_seqTimeouts.insert(SeqTimeout(seq, Simulator::Now())); m_seqFullDelay.insert(SeqTimeout(seq, Simulator::Now())); m_seqLastDelay.erase(seq); m_seqLastDelay.insert(SeqTimeout(seq, Simulator::Now())); m_seqRetxCounts[seq]++; m_rtt->SentSeq(SequenceNumber32(seq), 1); m_transmittedInterests(interest, this, m_face); m_face->onReceiveInterest(*interest); NameTime *nt = new NameTime(interest->getName(), Simulator::Now(), Simulator::Now()); startTimes.push_back(nt); AccountingConsumer::ScheduleNextPacket(); sentCount++; //std::cout << "> Consumer(" << GetNode()->GetId() << ") is sending interest, " // << nameWithSequence->toUri() << " and nonce " << interest->getNonce() << std::endl; }