static inline void ProcessUDPFlow(FlowSource_t *fs, struct FlowNode *NewNode ) { struct FlowNode *Node; assert(NewNode->memflag == NODE_IN_USE); // Flush DNS queries directly if ( NewNode->src_port == 53 || NewNode->dst_port == 53 ) { StorePcapFlow(fs, NewNode); Free_Node(NewNode); return; } // insert other UDP traffic Node = Insert_Node(NewNode); // if insert fails, the existing node is returned -> flow exists already if ( Node == NULL ) { dbg_printf("New UDP flow: Packets: %u, Bytes: %u\n", NewNode->packets, NewNode->bytes); return; } assert(Node->memflag == NODE_IN_USE); // update existing flow Node->packets++; Node->bytes += NewNode->bytes; Node->t_last = NewNode->t_last; dbg_printf("Existing UDP flow: Packets: %u, Bytes: %u\n", Node->packets, Node->bytes); Free_Node(NewNode); } // End of ProcessUDPFlow
static inline void ProcessTCPFlow(FlowSource_t *fs, struct FlowNode *NewNode ) { struct FlowNode *Node; assert(NewNode->memflag == NODE_IN_USE); Node = Insert_Node(NewNode); // Return existing Node if flow exists already, otherwise insert es new if ( Node == NULL ) { // Insert as new dbg_printf("New TCP flow: Packets: %u, Bytes: %u\n", NewNode->packets, NewNode->bytes); // in case it's a FIN/RST only packet - immediately flush it if ( NewNode->fin == FIN_NODE ) { // flush node if ( StorePcapFlow(fs, NewNode) ) { Remove_Node(NewNode); } } if ( !CacheCheck() ) { uint32_t NumFlows; LogError("Node cache exhausted! - Immediate flush - increase flow cache!!"); NumFlows = Flush_FlowTree(fs); LogError("Flushed flows: %u", NumFlows); } if ( Link_RevNode(NewNode)) { // if we could link this new node, it is the server answer // -> calculate server latency SetServer_latency(NewNode); } return; } assert(Node->memflag == NODE_IN_USE); // check for first client ACK for client latency if ( Node->latency.flag == 1 ) { SetClient_latency(Node, &(NewNode->t_first)); } else if ( Node->latency.flag == 2 ) { SetApplication_latency(Node, &(NewNode->t_first)); } // update existing flow Node->flags |= NewNode->flags; Node->packets++; Node->bytes += NewNode->bytes; Node->t_last = NewNode->t_last; dbg_printf("Existing TCP flow: Packets: %u, Bytes: %u\n", Node->packets, Node->bytes); if ( NewNode->fin == FIN_NODE) { // flush node Node->fin = FIN_NODE; if ( StorePcapFlow(fs, Node) ) { Remove_Node(Node); } } else { Free_Node(NewNode); } } // End of ProcessTCPFlow
static inline void ProcessICMPFlow(FlowSource_t *fs, struct FlowNode *NewNode ) { // Flush ICMP directly StorePcapFlow(fs, NewNode); dbg_printf("Flush ICMP flow: Packets: %u, Bytes: %u\n", NewNode->packets, NewNode->bytes); Free_Node(NewNode); } // End of ProcessICMPFlow
static inline void ProcessOtherFlow(FlowSource_t *fs, struct FlowNode *NewNode ) { // Flush Other packets directly StorePcapFlow(fs, NewNode); dbg_printf("Flush Other flow: Proto: %u, Packets: %u, Bytes: %u\n", NewNode->proto, NewNode->packets, NewNode->bytes); Free_Node(NewNode); } // End of ProcessOtherFlow
void Remove_Node(struct FlowNode *node) { #ifdef DEVEL if ( NumFlows == 0 ) { LogError("Remove_Node() Fatal Tried to remove a Node from empty tree"); return; } #endif RB_REMOVE(FlowTree, FlowTree, node); Free_Node(node); NumFlows--; } // End of Lookup_FlowTree
static inline void ProcessTCPFlow (FlowSource_t *fs, struct FlowNode *NewNode) { struct FlowNode *Node; assert (NewNode->memflag == NODE_IN_USE); Node = Insert_Node (NewNode); // if insert fails, the existing node is returned -> flow exists already if (Node == NULL) { dbg_printf ("New TCP flow: Packets: %u, Bytes: %u\n", NewNode->packets, NewNode->bytes); // in case it's a FIN/RST only packet - immediately flush it if (NewNode->fin == FIN_NODE) { // flush node if (StorePcapFlow (fs, NewNode)) { Remove_Node (NewNode); } } if (!CacheCheck()) { uint32_t NumFlows; LogError ("Node cache exhausted! - Immediate flush - increase flow cache!!"); NumFlows = Flush_FlowTree (fs); LogError ("Flushed flows: %u", NumFlows); } return; } assert (Node->memflag == NODE_IN_USE); // update existing flow Node->flags |= NewNode->flags; Node->packets++; Node->bytes += NewNode->bytes; Node->t_last = NewNode->t_last; dbg_printf ("Existing TCP flow: Packets: %u, Bytes: %u\n", Node->packets, Node->bytes); if (NewNode->fin == FIN_NODE) { // flush node Node->fin = FIN_NODE; if (StorePcapFlow (fs, Node)) { Remove_Node (Node); } } else { Free_Node (NewNode); } } // End of ProcessTCPFlow
void ProcessPacket(NodeList_t *NodeList, pcap_dev_t *pcap_dev, const struct pcap_pkthdr *hdr, const u_char *data) { struct FlowNode *Node; struct ip *ip; void *payload, *defragmented; uint32_t size_ip, offset, data_len, payload_len, bytes; uint16_t version, ethertype, proto; #ifdef DEVEL char s1[64]; char s2[64]; #endif static unsigned pkg_cnt = 0; pkg_cnt++; dbg_printf("\nNext Packet: %u\n", pkg_cnt); pcap_dev->proc_stat.packets++; offset = pcap_dev->linkoffset; Node = New_Node(); if ( !Node ) { pcap_dev->proc_stat.skipped++; LogError("Node allocation error - skip packet"); return; } if ( pcap_dev->linktype == DLT_EN10MB ) { ethertype = data[12] << 0x08 | data[13]; int IEEE802 = ethertype <= 1500; if ( IEEE802 ) { pcap_dev->proc_stat.skipped++; Free_Node(Node); return; } REDO_LINK: switch (ethertype) { case 0x800: // IPv4 case 0x86DD: // IPv6 break; case 0x8100: { // VLAN do { vlan_hdr_t *vlan_hdr = (vlan_hdr_t *)(data + offset); // offset points to end of link layer dbg_printf("VLAN ID: %u, type: 0x%x\n", ntohs(vlan_hdr->vlan_id), ntohs(vlan_hdr->type) ); ethertype = ntohs(vlan_hdr->type); /* pkt->vlans[pkt->vlan_count].pcp = (p[0] >> 5) & 7; pkt->vlans[pkt->vlan_count].cfi = (p[0] >> 4) & 1; pkt->vlans[pkt->vlan_count].vid = uint_16_be(p) & 0xfff; */ offset += 4; } while ( ethertype == 0x8100 ); // redo ethertype evaluation goto REDO_LINK; } break; case 0x806: // skip ARP // silently skip ARP pcap_dev->proc_stat.skipped++; return; break; case 0x26: // ?? multicast router termination ?? case 0x4305: // B.A.T.M.A.N. BATADV case 0x886f: // MS NLB heartbeat case 0x88a2: // ATA over ethernet case 0x88cc: // CISCO LLDP case 0x9000: // Loop case 0x880b: // PPP - rfc 7042 pcap_dev->proc_stat.skipped++; if ( Node->proto ) { // if it's an encap which we do not understand yet - push tunnel Push_Node(NodeList, Node); } else { pcap_dev->proc_stat.skipped++; dbg_printf("Skip Ethertype 0x%x", ethertype); Free_Node(Node); } return; break; default: pcap_dev->proc_stat.unknown++; LogError("Unsupported link type: 0x%x, packet: %u", ethertype, pkg_cnt); Free_Node(Node); return; } } if (hdr->caplen < offset) { pcap_dev->proc_stat.short_snap++; LogError("Short packet: %u/%u", hdr->caplen, offset); Free_Node(Node); return; } Node->t_first.tv_sec = hdr->ts.tv_sec; Node->t_first.tv_usec = hdr->ts.tv_usec; Node->t_last.tv_sec = hdr->ts.tv_sec; Node->t_last.tv_usec = hdr->ts.tv_usec; data = data + offset; data_len = hdr->caplen - offset; offset = 0; defragmented = NULL; // IP decoding REDO_IPPROTO: // IP decoding if ( defragmented ) { // data is sitting on a defragmented IPv4 packet memory region // REDO loop could result in a memory leak, if again IP is fragmented // XXX memory leak to be fixed LogError("Fragmentation memory leak triggered!"); } ip = (struct ip *)(data + offset); // offset points to end of link layer version = ip->ip_v; // ip version if ( version == 6 ) { uint64_t *addr; struct ip6_hdr *ip6 = (struct ip6_hdr *) (data + offset); size_ip = sizeof(struct ip6_hdr); offset = size_ip; // offset point to end of IP header if ( data_len < size_ip ) { LogError("Packet: %u Length error: data_len: %u < size IPV6: %u, captured: %u, hdr len: %u", pkg_cnt, data_len, size_ip, hdr->caplen, hdr->len); pcap_dev->proc_stat.short_snap++; Free_Node(Node); return; } // XXX Extension headers not processed proto = ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt; payload_len = bytes = ntohs(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); if (data_len < (payload_len + size_ip) ) { // capture len was limited - so adapt payload_len payload_len = data_len - size_ip; } dbg_printf("Packet IPv6, SRC %s, DST %s, ", inet_ntop(AF_INET6, &ip6->ip6_src, s1, sizeof(s1)), inet_ntop(AF_INET6, &ip6->ip6_dst, s2, sizeof(s2))); payload = (void *)ip + size_ip; addr = (uint64_t *)&ip6->ip6_src; Node->src_addr.v6[0] = ntohll(addr[0]); Node->src_addr.v6[1] = ntohll(addr[1]); addr = (uint64_t *)&ip6->ip6_dst; Node->dst_addr.v6[0] = ntohll(addr[0]); Node->dst_addr.v6[1] = ntohll(addr[1]); Node->version = AF_INET6; } else if ( version == 4 ) { uint16_t ip_off = ntohs(ip->ip_off); uint32_t frag_offset = (ip_off & IP_OFFMASK) << 3; size_ip = (ip->ip_hl << 2); offset = size_ip; // offset point to end of IP header if ( data_len < size_ip ) { LogError("Packet: %u Length error: data_len: %u < size IPV4: %u, captured: %u, hdr len: %u", pkg_cnt, data_len, size_ip, hdr->caplen, hdr->len); pcap_dev->proc_stat.short_snap++; Free_Node(Node); return; } payload_len = ntohs(ip->ip_len); dbg_printf("size IP hader: %u, len: %u, %u\n", size_ip, ip->ip_len, payload_len); payload_len -= size_ip; // ajust length compatibel IPv6 bytes = payload_len; payload = (void *)ip + size_ip; proto = ip->ip_p; if (data_len < (payload_len + size_ip) ) { // capture len was limited - so adapt payload_len payload_len = data_len - size_ip; pcap_dev->proc_stat.short_snap++; } dbg_printf("Packet IPv4 SRC %s, DST %s, ", inet_ntop(AF_INET, &ip->ip_src, s1, sizeof(s1)), inet_ntop(AF_INET, &ip->ip_dst, s2, sizeof(s2))); // IPv4 defragmentation if ( (ip_off & IP_MF) || frag_offset ) { uint16_t ip_id = ntohs(ip->ip_id); #ifdef DEVEL if ( frag_offset == 0 ) printf("Fragmented packet: first segement: ip_off: %u, frag_offset: %u\n", ip_off, frag_offset); if (( ip_off & IP_MF ) && frag_offset ) printf("Fragmented packet: middle segement: ip_off: %u, frag_offset: %u\n", ip_off, frag_offset); if (( ip_off & IP_MF ) == 0 ) printf("Fragmented packet: last segement: ip_off: %u, frag_offset: %u\n", ip_off, frag_offset); #endif // fragmented packet defragmented = IPFrag_tree_Update(ip->ip_src.s_addr, ip->ip_dst.s_addr, ip_id, &payload_len, ip_off, payload); if ( defragmented == NULL ) { // not yet complete dbg_printf("Fragmentation not yet completed\n"); return; } dbg_printf("Fragmentation assembled\n"); // packet defragmented - set payload to defragmented data payload = defragmented; } Node->src_addr.v6[0] = 0; Node->src_addr.v6[1] = 0; Node->src_addr.v4 = ntohl(ip->ip_src.s_addr); Node->dst_addr.v6[0] = 0; Node->dst_addr.v6[1] = 0; Node->dst_addr.v4 = ntohl(ip->ip_dst.s_addr); Node->version = AF_INET; } else { LogError("ProcessPacket() Unsupprted protocol version: %i", version); pcap_dev->proc_stat.unknown++; Free_Node(Node); return; } Node->packets = 1; Node->bytes = bytes; Node->proto = proto; dbg_printf("Payload: %u bytes, Full packet: %u bytes\n", payload_len, bytes); // TCP/UDP decoding switch (proto) { case IPPROTO_UDP: { struct udphdr *udp = (struct udphdr *)payload; uint16_t UDPlen = ntohs(udp->uh_ulen); if ( UDPlen < 8 ) { LogError("UDP payload legth error: %u bytes < 8\n", UDPlen); Free_Node(Node); break; } uint32_t size_udp_payload = ntohs(udp->uh_ulen) - 8; if ( (bytes == payload_len ) && (payload_len - sizeof(struct udphdr)) != size_udp_payload ) { LogError("UDP payload legth error: Expected %u, have %u bytes\n", size_udp_payload, (payload_len - (unsigned)sizeof(struct udphdr))); Free_Node(Node); break; } payload = payload + sizeof(struct udphdr); payload_len -= sizeof(struct udphdr); dbg_printf("UDP: size: %u, SRC: %i, DST: %i\n", size_udp_payload, ntohs(udp->uh_sport), ntohs(udp->uh_dport)); Node->flags = 0; Node->src_port = ntohs(udp->uh_sport); Node->dst_port = ntohs(udp->uh_dport); if ( hdr->caplen == hdr->len ) { // process payload of full packets if ( (bytes == payload_len) && (Node->src_port == 53 || Node->dst_port == 53) ) content_decode_dns(Node, payload, payload_len); } Push_Node(NodeList, Node); } break; case IPPROTO_TCP: { struct tcphdr *tcp = (struct tcphdr *)payload; uint32_t size_tcp; size_tcp = tcp->th_off << 2; if ( payload_len < size_tcp ) { LogError("TCP header length error: len: %u < size TCP header: %u", payload_len, size_tcp); pcap_dev->proc_stat.short_snap++; Free_Node(Node); break; } payload = payload + size_tcp; payload_len -= size_tcp; dbg_printf("Size TCP header: %u, size TCP payload: %u ", size_tcp, payload_len); dbg_printf("src %i, DST %i, flags %i : ", ntohs(tcp->th_sport), ntohs(tcp->th_dport), tcp->th_flags); #ifdef DEVEL if ( tcp->th_flags & TH_SYN ) printf("SYN "); if ( tcp->th_flags & TH_ACK ) printf("ACK "); if ( tcp->th_flags & TH_URG ) printf("URG "); if ( tcp->th_flags & TH_PUSH ) printf("PUSH "); if ( tcp->th_flags & TH_FIN ) printf("FIN "); if ( tcp->th_flags & TH_RST ) printf("RST "); printf("\n"); #endif Node->flags = tcp->th_flags; Node->src_port = ntohs(tcp->th_sport); Node->dst_port = ntohs(tcp->th_dport); Push_Node(NodeList, Node); } break; case IPPROTO_ICMP: { struct icmp *icmp = (struct icmp *)payload; Node->dst_port = (icmp->icmp_type << 8 ) + icmp->icmp_code; dbg_printf("IPv%d ICMP proto: %u, type: %u, code: %u\n", version, ip->ip_p, icmp->icmp_type, icmp->icmp_code); Push_Node(NodeList, Node); } break; case IPPROTO_ICMPV6: { struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)payload; Node->dst_port = (icmp6->icmp6_type << 8 ) + icmp6->icmp6_code; dbg_printf("IPv%d ICMP proto: %u, type: %u, code: %u\n", version, ip->ip_p, icmp6->icmp6_type, icmp6->icmp6_code); Push_Node(NodeList, Node); } break; case IPPROTO_IPV6: { uint32_t size_inner_ip = sizeof(struct ip6_hdr); if ( payload_len < size_inner_ip ) { LogError("IPIPv6 tunnel header length error: len: %u < size inner IP: %u", payload_len, size_inner_ip); pcap_dev->proc_stat.short_snap++; if ( defragmented ) { free(defragmented); defragmented = NULL; } Free_Node(Node); return; } offset = 0; data = payload; data_len = payload_len; // // move IP to tun IP Node->tun_src_addr = Node->src_addr; Node->tun_dst_addr = Node->dst_addr; Node->tun_proto = IPPROTO_IPIP; dbg_printf("IPIPv6 tunnel - inner IPv6:\n"); // redo proto evaluation goto REDO_IPPROTO; } break; case IPPROTO_IPIP: { struct ip *inner_ip = (struct ip *)payload; uint32_t size_inner_ip = (inner_ip->ip_hl << 2); if ( payload_len < size_inner_ip ) { LogError("IPIP tunnel header length error: len: %u < size inner IP: %u", payload_len, size_inner_ip); pcap_dev->proc_stat.short_snap++; Free_Node(Node); break; } offset = 0; data = payload; data_len = payload_len; // move IP to tun IP Node->tun_src_addr = Node->src_addr; Node->tun_dst_addr = Node->dst_addr; Node->tun_proto = IPPROTO_IPIP; dbg_printf("IPIP tunnel - inner IP:\n"); // redo proto evaluation goto REDO_IPPROTO; } break; case IPPROTO_GRE: { gre_hdr_t *gre_hdr = (gre_hdr_t *)payload; uint32_t gre_hdr_size = sizeof(gre_hdr_t); // offset points to end of inner IP if ( payload_len < gre_hdr_size ) { LogError("GRE tunnel header length error: len: %u < size GRE hdr: %u", payload_len, gre_hdr_size); pcap_dev->proc_stat.short_snap++; Free_Node(Node); break; } dbg_printf("GRE proto encapsulation: type: 0x%x\n", ethertype); ethertype = ntohs(gre_hdr->type); offset = gre_hdr_size; data = payload; data_len = payload_len; // move IP to tun IP Node->tun_src_addr = Node->src_addr; Node->tun_dst_addr = Node->dst_addr; Node->tun_proto = IPPROTO_GRE; // redo IP proto evaluation goto REDO_LINK; } break; default: // not handled protocol - simply save node Push_Node(NodeList, Node); pcap_dev->proc_stat.unknown++; break; } if ( defragmented ) { free(defragmented); defragmented = NULL; dbg_printf("Defragmented buffer freed for proto %u", proto); } } // End of ProcessPacket