void parse_packet_pf_ring(const struct pfring_pkthdr* h, const u_char* p, const u_char* user_bytes) { memset((void*)&h->extended_hdr.parsed_pkt, 0, sizeof(h->extended_hdr.parsed_pkt)); pfring_parse_pkt((u_char*)p, (struct pfring_pkthdr*)h, 4, 1, 0); char buffer[512]; pfring_print_parsed_pkt(buffer, 512, p, h); std::cout << buffer; }
void dummyProcesssPacket(const struct pfring_pkthdr *h, const u_char *p, const u_char *user_bytes) { const int BUFSIZE = 4096; char bigbuf[BUFSIZE]; // buf into which we spew prints int buflen = 0; long threadId = (long)user_bytes; if(unlikely(do_shutdown)) return; if(verbose) { int s; uint nsec; if(h->ts.tv_sec == 0) { memset((void*)&h->extended_hdr.parsed_pkt, 0, sizeof(struct pkt_parsing_info)); pfring_parse_pkt((u_char*)p, (struct pfring_pkthdr*)h, 5, 1, 1); } s = (h->ts.tv_sec + thiszone) % 86400; nsec = h->extended_hdr.timestamp_ns % 1000; buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "%02d:%02d:%02d.%06u%03u ", s / 3600, (s % 3600) / 60, s % 60, (unsigned)h->ts.tv_usec, nsec); #if 0 for(i=0; i<32; i++) buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "%02X ", p[i]); printf("\n"); #endif buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "[T%lu]", threadId); if(use_extended_pkt_header) { buflen += pfring_print_parsed_pkt(&bigbuf[buflen], BUFSIZE - buflen, p, h); } else { struct ether_header *ehdr = (struct ether_header *) p; u_short eth_type = ntohs(ehdr->ether_type); if(eth_type == 0x0806) buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "[ARP]"); else buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "[eth_type=0x%04X]", eth_type); buflen += snprintf(&bigbuf[buflen], BUFSIZE - buflen, "[caplen=%d][len=%d][parsed_header_len=%d]" "[eth_offset=%d][l3_offset=%d][l4_offset=%d][payload_offset=%d]\n", h->caplen, h->len, h->extended_hdr.parsed_header_len, h->extended_hdr.parsed_pkt.offset.eth_offset, h->extended_hdr.parsed_pkt.offset.l3_offset, h->extended_hdr.parsed_pkt.offset.l4_offset, h->extended_hdr.parsed_pkt.offset.payload_offset); } fputs(bigbuf, stdout); } numPkts[threadId]++, numBytes[threadId] += h->len+24 /* 8 Preamble + 4 CRC + 12 IFG */; }
int pfring_recv_parsed(pfring *ring, u_char** buffer, u_int buffer_len, struct pfring_pkthdr *hdr, u_int8_t wait_for_incoming_packet, u_int8_t level /* 1..4 */, u_int8_t add_timestamp, u_int8_t add_hash) { int rc = pfring_recv(ring, buffer, buffer_len, hdr, wait_for_incoming_packet); if(rc > 0) rc = pfring_parse_pkt(*buffer, hdr, level, add_timestamp, add_hash); return rc; }
int pfring_dna_recv(pfring *ring, u_char** buffer, u_int buffer_len, struct pfring_pkthdr *hdr, u_int8_t wait_for_incoming_packet) { u_char *pkt = NULL; int8_t status = 0; if(unlikely(ring->reentrant)) pthread_rwlock_wrlock(&ring->rx_lock); redo_pfring_recv: if(ring->is_shutting_down || ring->break_recv_loop) { if(unlikely(ring->reentrant)) pthread_rwlock_unlock(&ring->rx_lock); return(-1); } pkt = ring->dna_next_packet(ring, buffer, buffer_len, hdr); if(pkt && (hdr->len > 0)) { if(buffer_len > 0) pfring_parse_pkt(*buffer, hdr, 4, 1, 1); hdr->extended_hdr.rx_direction = 1; if(unlikely(ring->reentrant)) pthread_rwlock_unlock(&ring->rx_lock); return(1); } if(wait_for_incoming_packet) { status = ring->dna_check_packet_to_read(ring, wait_for_incoming_packet); if(status > 0) goto redo_pfring_recv; } if(unlikely(ring->reentrant)) pthread_rwlock_unlock(&ring->rx_lock); return(0); }
int pfring_dag_recv(pfring *ring, u_char** buffer, u_int buffer_len, struct pfring_pkthdr *hdr, u_int8_t wait_for_incoming_packet) { int caplen = 0; int skip; dag_record_t *erf_hdr; uint16_t rlen; u_char *payload; uint8_t *ext_hdr_type; uint32_t ext_hdr_num; uint32_t len; unsigned long long ts; int retval = 0; pfring_dag *d; #ifdef DAG_DEBUG printf("[PF_RING] DAG recv\n"); #endif if(ring->priv_data == NULL) return -1; d = (pfring_dag *) ring->priv_data; if(ring->reentrant) pthread_rwlock_wrlock(&ring->rx_lock); check_and_poll: if (ring->break_recv_loop) goto exit; /* retval = 0 */ if ((d->top - d->bottom) < dag_record_size) { if ( (d->top = dag_advance_stream(d->fd, d->stream_num, (void * /* but it is void** */) &d->bottom)) == NULL) { retval = -1; goto exit; } if ( (d->top - d->bottom) < dag_record_size && !wait_for_incoming_packet ) goto exit; /* retval = 0 */ goto check_and_poll; } erf_hdr = (dag_record_t *) d->bottom; rlen = ntohs(erf_hdr->rlen); if (rlen < dag_record_size) { fprintf(stderr, "Error: wrong record size\n"); retval = -1; goto exit; } d->bottom += rlen; skip = 0; switch((erf_hdr->type & 0x7f)) { case TYPE_PAD: skip = 1; case TYPE_ETH: /* stats update */ if (erf_hdr->lctr) { if (d->stats_drop > (UINT_MAX - ntohs(erf_hdr->lctr))) d->stats_drop = UINT_MAX; else d->stats_drop += ntohs(erf_hdr->lctr); } break; /* Note: * In TYPE_COLOR_HASH_ETH, TYPE_DSM_COLOR_ETH, TYPE_COLOR_ETH * the color value overwrites the lctr */ default: break; } if (skip) goto check_and_poll; payload = (u_char *) erf_hdr; payload += dag_record_size; /* computing extension headers size */ ext_hdr_type = &erf_hdr->type; ext_hdr_num = 0; while ( (*ext_hdr_type & 0x80) && (rlen > (16 + ext_hdr_num * 8)) ) { ext_hdr_type += 8; ext_hdr_num++; } payload += 8 * ext_hdr_num; switch((erf_hdr->type & 0x7f)) { case TYPE_COLOR_HASH_ETH: case TYPE_DSM_COLOR_ETH: case TYPE_COLOR_ETH: /* * Note: * In TYPE_COLOR_HASH_ETH, TYPE_DSM_COLOR_ETH, TYPE_COLOR_ETH * the color value overwrites the lctr */ hdr->extended_hdr.pkt_hash /* 32bit */ = erf_hdr->lctr /* 16bit */; case TYPE_ETH: len = ntohs(erf_hdr->wlen); if (d->strip_crc) len -= 4; caplen = rlen; caplen -= dag_record_size; caplen -= (8 * ext_hdr_num); caplen -= 2; if (caplen > ring->caplen) caplen = ring->caplen; if (caplen > len) caplen = len; if((buffer_len > 0) && (caplen > buffer_len)) caplen = buffer_len; payload += 2; break; default: #ifdef DAG_DEBUG printf("Warning: unhandled ERF type\n"); #endif goto check_and_poll; } if (buffer_len > 0){ if(*buffer != NULL && caplen > 0) memcpy(*buffer, payload, caplen); } else *buffer = payload; hdr->caplen = caplen; hdr->len = len; /* computing timestamp as from DAG docs */ ts = erf_hdr->ts; hdr->ts.tv_sec = ts >> 32; ts = (ts & 0xffffffffULL) * 1000000; ts += 0x80000000; hdr->ts.tv_usec = ts >> 32; if (hdr->ts.tv_usec >= 1000000) { hdr->ts.tv_usec -= 1000000; hdr->ts.tv_sec++; } /* compute pf_ring timestamp_ns from ERF time stamp */ ts = erf_hdr->ts; ts = (ts & 0xffffffffULL) * 1000000000; ts += 0x80000000; ts >>= 32; ts += ((erf_hdr->ts >> 32) * 1000000000); hdr->extended_hdr.timestamp_ns = ts; #ifdef PFRING_DAG_PARSE_PKT pfring_parse_pkt(*buffer, hdr, 4, 0, 1); #else hdr->extended_hdr.parsed_header_len = 0; #endif d->stats_recv++; retval = 1; exit: if(ring->reentrant) pthread_rwlock_unlock(&ring->rx_lock); return retval; }
static int pfring_daq_acquire(void *handle, int cnt, DAQ_Analysis_Func_t callback, #if (DAQ_API_VERSION >= 0x00010002) DAQ_Meta_Func_t metaback, #endif void *user) { Pfring_Context_t *context =(Pfring_Context_t *) handle; int ret = 0, i, current_ring_idx = context->num_devices - 1, rx_ring_idx; struct pollfd pfd[DAQ_PF_RING_MAX_NUM_DEVICES]; hash_filtering_rule hash_rule; memset(&hash_rule, 0, sizeof(hash_rule)); context->analysis_func = callback; context->breakloop = 0; for (i = 0; i < context->num_devices; i++) pfring_enable_ring(context->ring_handles[i]); while((!context->breakloop) && ((cnt == -1) || (cnt > 0))) { struct pfring_pkthdr phdr; DAQ_PktHdr_t hdr; DAQ_Verdict verdict; memset(&phdr, 0, sizeof(phdr)); if(pfring_daq_reload_requested) pfring_daq_reload(context); for (i = 0; i < context->num_devices; i++) { current_ring_idx = (current_ring_idx + 1) % context->num_devices; ret = pfring_recv(context->ring_handles[current_ring_idx], &context->pkt_buffer, 0, &phdr, 0 /* Dont't wait */); if (ret > 0) break; } if(ret <= 0) { /* No packet to read: let's poll */ int rc; for (i = 0; i < context->num_devices; i++) { pfd[i].fd = pfring_get_selectable_fd(context->ring_handles[i]); pfd[i].events = POLLIN; pfd[i].revents = 0; } rc = poll(pfd, context->num_devices, context->timeout); if(rc < 0) { if(errno == EINTR) break; DPE(context->errbuf, "%s: Poll failed: %s(%d)", __FUNCTION__, strerror(errno), errno); return DAQ_ERROR; } } else { hdr.caplen = phdr.caplen; hdr.pktlen = phdr.len; hdr.ts = phdr.ts; #if (DAQ_API_VERSION >= 0x00010002) hdr.ingress_index = phdr.extended_hdr.if_index; hdr.egress_index = -1; hdr.ingress_group = -1; hdr.egress_group = -1; #else hdr.device_index = phdr.extended_hdr.if_index; #endif hdr.flags = 0; rx_ring_idx = current_ring_idx; context->stats.packets_received++; verdict = context->analysis_func(user, &hdr,(u_char*)context->pkt_buffer); if(verdict >= MAX_DAQ_VERDICT) verdict = DAQ_VERDICT_PASS; if (phdr.extended_hdr.parsed_pkt.eth_type == 0x0806 /* ARP */ ) verdict = DAQ_VERDICT_PASS; switch(verdict) { case DAQ_VERDICT_BLACKLIST: /* Block the packet and block all future packets in the same flow systemwide. */ if (context->use_kernel_filters) { pfring_parse_pkt(context->pkt_buffer, &phdr, 4, 0, 0); /* or use pfring_recv_parsed() to force parsing. */ hash_rule.rule_id = context->filter_count++; hash_rule.vlan_id = phdr.extended_hdr.parsed_pkt.vlan_id; hash_rule.proto = phdr.extended_hdr.parsed_pkt.l3_proto; memcpy(&hash_rule.host_peer_a, &phdr.extended_hdr.parsed_pkt.ipv4_src, sizeof(ip_addr)); memcpy(&hash_rule.host_peer_b, &phdr.extended_hdr.parsed_pkt.ipv4_dst, sizeof(ip_addr)); hash_rule.port_peer_a = phdr.extended_hdr.parsed_pkt.l4_src_port; hash_rule.port_peer_b = phdr.extended_hdr.parsed_pkt.l4_dst_port; hash_rule.plugin_action.plugin_id = NO_PLUGIN_ID; if (context->mode == DAQ_MODE_PASSIVE && context->num_reflector_devices > rx_ring_idx) { /* lowlevelbridge ON */ hash_rule.rule_action = reflect_packet_and_stop_rule_evaluation; snprintf(hash_rule.reflector_device_name, REFLECTOR_NAME_LEN, "%s", context->reflector_devices[rx_ring_idx]); } else { hash_rule.rule_action = dont_forward_packet_and_stop_rule_evaluation; } pfring_handle_hash_filtering_rule(context->ring_handles[rx_ring_idx], &hash_rule, 1 /* add_rule */); /* Purge rules idle (i.e. with no packet matching) for more than 1h */ pfring_purge_idle_hash_rules(context->ring_handles[rx_ring_idx], context->idle_rules_timeout); #if DEBUG printf("[DEBUG] %d.%d.%d.%d:%d -> %d.%d.%d.%d:%d Verdict=%d Action=%d\n", hash_rule.host_peer_a.v4 >> 24 & 0xFF, hash_rule.host_peer_a.v4 >> 16 & 0xFF, hash_rule.host_peer_a.v4 >> 8 & 0xFF, hash_rule.host_peer_a.v4 >> 0 & 0xFF, hash_rule.port_peer_a & 0xFFFF, hash_rule.host_peer_b.v4 >> 24 & 0xFF, hash_rule.host_peer_b.v4 >> 16 & 0xFF, hash_rule.host_peer_b.v4 >> 8 & 0xFF, hash_rule.host_peer_b.v4 >> 0 & 0xFF, hash_rule.port_peer_b & 0xFFFF, verdict, hash_rule.rule_action); #endif } break; case DAQ_VERDICT_WHITELIST: /* Pass the packet and fastpath all future packets in the same flow systemwide. */ case DAQ_VERDICT_IGNORE: /* Pass the packet and fastpath all future packets in the same flow for this application. */ /* Setting a rule for reflectiong packets when lowlevelbridge is ON could be an optimization here, * but we can't set "forward" (reflector won't work) or "reflect" (packets reflected twice) hash rules */ case DAQ_VERDICT_PASS: /* Pass the packet */ case DAQ_VERDICT_REPLACE: /* Pass a packet that has been modified in-place.(No resizing allowed!) */ if (context->mode == DAQ_MODE_INLINE) { pfring_daq_send_packet(context, context->ring_handles[rx_ring_idx ^ 0x1], hdr.caplen, context->ring_handles[rx_ring_idx], context->ifindexes[rx_ring_idx ^ 0x1]); } break; case DAQ_VERDICT_BLOCK: /* Block the packet. */ /* Nothing to do really */ break; case MAX_DAQ_VERDICT: /* No way we can reach this point */ break; } context->stats.verdicts[verdict]++; if(cnt > 0) cnt--; } }