void DatagramTransport<T, U>::receiveDatagram(const uint8_t* buffer, size_t nBytesReceived, const boost::system::error_code& error) { if (error) return processErrorCode(error); NFD_LOG_FACE_TRACE("Received: " << nBytesReceived << " bytes from " << m_sender); bool isOk = false; Block element; std::tie(isOk, element) = Block::fromBuffer(buffer, nBytesReceived); if (!isOk) { NFD_LOG_FACE_WARN("Failed to parse incoming packet from " << m_sender); // This packet won't extend the face lifetime return; } if (element.size() != nBytesReceived) { NFD_LOG_FACE_WARN("Received datagram size and decoded element size don't match"); // This packet won't extend the face lifetime return; } m_hasRecentlyReceived = true; Transport::Packet tp(std::move(element)); tp.remoteEndpoint = makeEndpointId(m_sender); this->receive(std::move(tp)); }
void GenericLinkService::decodeNetPacket(const Block& netPkt, const lp::Packet& firstPkt, const EndpointId& endpointId) { try { switch (netPkt.type()) { case tlv::Interest: if (firstPkt.has<lp::NackField>()) { this->decodeNack(netPkt, firstPkt, endpointId); } else { this->decodeInterest(netPkt, firstPkt, endpointId); } break; case tlv::Data: this->decodeData(netPkt, firstPkt, endpointId); break; default: ++this->nInNetInvalid; NFD_LOG_FACE_WARN("unrecognized network-layer packet TLV-TYPE " << netPkt.type() << ": DROP"); return; } } catch (const tlv::Error& e) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("packet parse error (" << e.what() << "): DROP"); } }
void GenericLinkService::doReceivePacket(const Block& packet, const EndpointId& endpoint) { try { lp::Packet pkt(packet); if (m_options.reliabilityOptions.isEnabled) { m_reliability.processIncomingPacket(pkt); } if (!pkt.has<lp::FragmentField>()) { NFD_LOG_FACE_TRACE("received IDLE packet: DROP"); return; } if ((pkt.has<lp::FragIndexField>() || pkt.has<lp::FragCountField>()) && !m_options.allowReassembly) { NFD_LOG_FACE_WARN("received fragment, but reassembly disabled: DROP"); return; } bool isReassembled = false; Block netPkt; lp::Packet firstPkt; std::tie(isReassembled, netPkt, firstPkt) = m_reassembler.receiveFragment(endpoint, pkt); if (isReassembled) { this->decodeNetPacket(netPkt, firstPkt, endpoint); } } catch (const tlv::Error& e) { ++this->nInLpInvalid; NFD_LOG_FACE_WARN("packet parse error (" << e.what() << "): DROP"); } }
void GenericLinkService::decodeData(const Block& netPkt, const lp::Packet& firstPkt, const EndpointId& endpointId) { BOOST_ASSERT(netPkt.type() == tlv::Data); // forwarding expects Data to be created with make_shared auto data = make_shared<Data>(netPkt); if (firstPkt.has<lp::NackField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received Nack with Data: DROP"); return; } if (firstPkt.has<lp::NextHopFaceIdField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received NextHopFaceId with Data: DROP"); return; } if (firstPkt.has<lp::CachePolicyField>()) { // CachePolicy is unprivileged and does not require allowLocalFields option. // In case of an invalid CachePolicyType, get<lp::CachePolicyField> will throw, // so it's unnecessary to check here. data->setTag(make_shared<lp::CachePolicyTag>(firstPkt.get<lp::CachePolicyField>())); } if (firstPkt.has<lp::IncomingFaceIdField>()) { NFD_LOG_FACE_WARN("received IncomingFaceId: IGNORE"); } if (firstPkt.has<lp::CongestionMarkField>()) { data->setTag(make_shared<lp::CongestionMarkTag>(firstPkt.get<lp::CongestionMarkField>())); } if (firstPkt.has<lp::NonDiscoveryField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received NonDiscovery with Data: DROP"); return; } if (firstPkt.has<lp::PrefixAnnouncementField>()) { if (m_options.allowSelfLearning) { data->setTag(make_shared<lp::PrefixAnnouncementTag>(firstPkt.get<lp::PrefixAnnouncementField>())); } else { NFD_LOG_FACE_WARN("received PrefixAnnouncement, but self-learning disabled: IGNORE"); } } this->receiveData(*data, endpointId); }
void GenericLinkService::decodeInterest(const Block& netPkt, const lp::Packet& firstPkt, const EndpointId& endpointId) { BOOST_ASSERT(netPkt.type() == tlv::Interest); BOOST_ASSERT(!firstPkt.has<lp::NackField>()); // forwarding expects Interest to be created with make_shared auto interest = make_shared<Interest>(netPkt); if (firstPkt.has<lp::NextHopFaceIdField>()) { if (m_options.allowLocalFields) { interest->setTag(make_shared<lp::NextHopFaceIdTag>(firstPkt.get<lp::NextHopFaceIdField>())); } else { NFD_LOG_FACE_WARN("received NextHopFaceId, but local fields disabled: DROP"); return; } } if (firstPkt.has<lp::CachePolicyField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received CachePolicy with Interest: DROP"); return; } if (firstPkt.has<lp::IncomingFaceIdField>()) { NFD_LOG_FACE_WARN("received IncomingFaceId: IGNORE"); } if (firstPkt.has<lp::CongestionMarkField>()) { interest->setTag(make_shared<lp::CongestionMarkTag>(firstPkt.get<lp::CongestionMarkField>())); } if (firstPkt.has<lp::NonDiscoveryField>()) { if (m_options.allowSelfLearning) { interest->setTag(make_shared<lp::NonDiscoveryTag>(firstPkt.get<lp::NonDiscoveryField>())); } else { NFD_LOG_FACE_WARN("received NonDiscovery, but self-learning disabled: IGNORE"); } } if (firstPkt.has<lp::PrefixAnnouncementField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received PrefixAnnouncement with Interest: DROP"); return; } this->receiveInterest(*interest, endpointId); }
UdpFace::UdpFace(const FaceUri& remoteUri, const FaceUri& localUri, protocol::socket socket, ndn::nfd::FacePersistency persistency, const time::seconds& idleTimeout) : DatagramFace(remoteUri, localUri, std::move(socket)) , m_idleTimeout(idleTimeout) , m_lastIdleCheck(time::steady_clock::now()) { this->setPersistency(persistency); #ifdef __linux__ // // By default, Linux does path MTU discovery on IPv4 sockets, // and sets the DF (Don't Fragment) flag on datagrams smaller // than the interface MTU. However this does not work for us, // because we cannot properly respond to ICMP "packet too big" // messages by fragmenting the packet at the application level, // since we want to rely on IP for fragmentation and reassembly. // // Therefore, we disable PMTU discovery, which prevents the kernel // from setting the DF flag on outgoing datagrams, and thus allows // routers along the path to perform fragmentation as needed. // const int value = IP_PMTUDISC_DONT; if (::setsockopt(m_socket.native_handle(), IPPROTO_IP, IP_MTU_DISCOVER, &value, sizeof(value)) < 0) { NFD_LOG_FACE_WARN("Failed to disable path MTU discovery: " << std::strerror(errno)); } #endif if (this->getPersistency() == ndn::nfd::FACE_PERSISTENCY_ON_DEMAND && m_idleTimeout > time::seconds::zero()) { m_closeIfIdleEvent = scheduler::schedule(m_idleTimeout, bind(&UdpFace::closeIfIdle, this)); } }
void WebSocketTransport::handlePongTimeout() { NFD_LOG_FACE_WARN(__func__); this->setState(TransportState::FAILED); doClose(); }
ssize_t DatagramTransport<T, U>::getSendQueueLength() { ssize_t queueLength = getTxQueueLength(m_socket.native_handle()); if (queueLength == QUEUE_ERROR) { NFD_LOG_FACE_WARN("Failed to obtain send queue length from socket: " << std::strerror(errno)); } return queueLength; }
void GenericLinkService::decodeNack(const Block& netPkt, const lp::Packet& firstPkt, const EndpointId& endpointId) { BOOST_ASSERT(netPkt.type() == tlv::Interest); BOOST_ASSERT(firstPkt.has<lp::NackField>()); lp::Nack nack((Interest(netPkt))); nack.setHeader(firstPkt.get<lp::NackField>()); if (firstPkt.has<lp::NextHopFaceIdField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received NextHopFaceId with Nack: DROP"); return; } if (firstPkt.has<lp::CachePolicyField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received CachePolicy with Nack: DROP"); return; } if (firstPkt.has<lp::IncomingFaceIdField>()) { NFD_LOG_FACE_WARN("received IncomingFaceId: IGNORE"); } if (firstPkt.has<lp::CongestionMarkField>()) { nack.setTag(make_shared<lp::CongestionMarkTag>(firstPkt.get<lp::CongestionMarkField>())); } if (firstPkt.has<lp::NonDiscoveryField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received NonDiscovery with Nack: DROP"); return; } if (firstPkt.has<lp::PrefixAnnouncementField>()) { ++this->nInNetInvalid; NFD_LOG_FACE_WARN("received PrefixAnnouncement with Nack: DROP"); return; } this->receiveNack(nack, endpointId); }
void WebSocketTransport::receiveMessage(const std::string& msg) { NFD_LOG_FACE_TRACE("Received: " << msg.size() << " bytes"); bool isOk = false; Block element; std::tie(isOk, element) = Block::fromBuffer(reinterpret_cast<const uint8_t*>(msg.c_str()), msg.size()); if (!isOk) { NFD_LOG_FACE_WARN("Failed to parse message payload"); return; } this->receive(Transport::Packet(std::move(element))); }
void WebSocketTransport::processErrorCode(const websocketpp::lib::error_code& error) { NFD_LOG_FACE_TRACE(__func__); if (getState() == TransportState::CLOSING || getState() == TransportState::FAILED || getState() == TransportState::CLOSED) // transport is shutting down, ignore any errors return; NFD_LOG_FACE_WARN("Send or ping operation failed: " << error.message()); this->setState(TransportState::FAILED); doClose(); }
DatagramTransport<T, U>::DatagramTransport(typename DatagramTransport::protocol::socket&& socket) : m_socket(std::move(socket)) , m_hasRecentlyReceived(false) { boost::asio::socket_base::send_buffer_size sendBufferSizeOption; boost::system::error_code error; m_socket.get_option(sendBufferSizeOption, error); if (error) { NFD_LOG_FACE_WARN("Failed to obtain send queue capacity from socket: " << error.message()); this->setSendQueueCapacity(QUEUE_ERROR); } else { this->setSendQueueCapacity(sendBufferSizeOption.value()); } m_socket.async_receive_from(boost::asio::buffer(m_receiveBuffer), m_sender, [this] (auto&&... args) { this->handleReceive(std::forward<decltype(args)>(args)...); }); }
void GenericLinkService::sendLpPacket(lp::Packet&& pkt, const EndpointId& endpointId) { const ssize_t mtu = this->getTransport()->getMtu(); if (m_options.reliabilityOptions.isEnabled) { m_reliability.piggyback(pkt, mtu); } if (m_options.allowCongestionMarking) { checkCongestionLevel(pkt); } auto block = pkt.wireEncode(); if (mtu != MTU_UNLIMITED && block.size() > static_cast<size_t>(mtu)) { ++this->nOutOverMtu; NFD_LOG_FACE_WARN("attempted to send packet over MTU limit"); return; } this->sendPacket(block, endpointId); }
UnicastUdpTransport::UnicastUdpTransport(protocol::socket&& socket, ndn::nfd::FacePersistency persistency, time::nanoseconds idleTimeout) : DatagramTransport(std::move(socket)) , m_idleTimeout(idleTimeout) { this->setLocalUri(FaceUri(m_socket.local_endpoint())); this->setRemoteUri(FaceUri(m_socket.remote_endpoint())); this->setScope(ndn::nfd::FACE_SCOPE_NON_LOCAL); this->setPersistency(persistency); this->setLinkType(ndn::nfd::LINK_TYPE_POINT_TO_POINT); this->setMtu(udp::computeMtu(m_socket.local_endpoint())); NFD_LOG_FACE_INFO("Creating transport"); #ifdef __linux__ // // By default, Linux does path MTU discovery on IPv4 sockets, // and sets the DF (Don't Fragment) flag on datagrams smaller // than the interface MTU. However this does not work for us, // because we cannot properly respond to ICMP "packet too big" // messages by fragmenting the packet at the application level, // since we want to rely on IP for fragmentation and reassembly. // // Therefore, we disable PMTU discovery, which prevents the kernel // from setting the DF flag on outgoing datagrams, and thus allows // routers along the path to perform fragmentation as needed. // const int value = IP_PMTUDISC_DONT; if (::setsockopt(m_socket.native_handle(), IPPROTO_IP, IP_MTU_DISCOVER, &value, sizeof(value)) < 0) { NFD_LOG_FACE_WARN("Failed to disable path MTU discovery: " << std::strerror(errno)); } #endif if (getPersistency() == ndn::nfd::FACE_PERSISTENCY_ON_DEMAND && m_idleTimeout > time::nanoseconds::zero()) { scheduleClosureWhenIdle(); } }
std::tuple<bool, Block, lp::Packet> LpReassembler::receiveFragment(Transport::EndpointId remoteEndpoint, const lp::Packet& packet) { BOOST_ASSERT(packet.has<lp::FragmentField>()); static auto FALSE_RETURN = std::make_tuple(false, Block(), lp::Packet()); // read and check FragIndex and FragCount uint64_t fragIndex = 0; uint64_t fragCount = 1; if (packet.has<lp::FragIndexField>()) { fragIndex = packet.get<lp::FragIndexField>(); } if (packet.has<lp::FragCountField>()) { fragCount = packet.get<lp::FragCountField>(); } if (fragIndex >= fragCount) { NFD_LOG_FACE_WARN("reassembly error, FragIndex>=FragCount: DROP"); return FALSE_RETURN; } if (fragCount > m_options.nMaxFragments) { NFD_LOG_FACE_WARN("reassembly error, FragCount over limit: DROP"); return FALSE_RETURN; } // check for fast path if (fragIndex == 0 && fragCount == 1) { ndn::Buffer::const_iterator fragBegin, fragEnd; std::tie(fragBegin, fragEnd) = packet.get<lp::FragmentField>(); Block netPkt(&*fragBegin, std::distance(fragBegin, fragEnd)); return std::make_tuple(true, netPkt, packet); } // check Sequence and compute message identifier if (!packet.has<lp::SequenceField>()) { NFD_LOG_FACE_WARN("reassembly error, Sequence missing: DROP"); return FALSE_RETURN; } lp::Sequence messageIdentifier = packet.get<lp::SequenceField>() - fragIndex; Key key = std::make_tuple(remoteEndpoint, messageIdentifier); // add to PartialPacket PartialPacket& pp = m_partialPackets[key]; if (pp.fragCount == 0) { // new PartialPacket pp.fragCount = fragCount; pp.nReceivedFragments = 0; pp.fragments.resize(fragCount); } else { if (fragCount != pp.fragCount) { NFD_LOG_FACE_WARN("reassembly error, FragCount changed: DROP"); return FALSE_RETURN; } } if (pp.fragments[fragIndex].has<lp::SequenceField>()) { NFD_LOG_FACE_TRACE("fragment already received: DROP"); return FALSE_RETURN; } pp.fragments[fragIndex] = packet; ++pp.nReceivedFragments; // check complete condition if (pp.nReceivedFragments == pp.fragCount) { Block reassembled = doReassembly(key); lp::Packet firstFrag(std::move(pp.fragments[0])); m_partialPackets.erase(key); return std::make_tuple(true, reassembled, firstFrag); } // set drop timer pp.dropTimer = scheduler::schedule(m_options.reassemblyTimeout, [=] { timeoutPartialPacket(key); }); return FALSE_RETURN; }