static void enter_mode_rx_only_or_dump(struct mode *mode) { int sock, irq, ifindex, fd = 0, ret; unsigned int size, it = 0; unsigned long fcnt = 0; short ifflags = 0; uint8_t *packet; struct ring rx_ring; struct pollfd rx_poll; struct frame_map *hdr; struct sock_fprog bpf_ops; if (!device_up_and_running(mode->device_in)) panic("Device not up and running!\n"); set_memcpy(); sock = pf_socket(); if (mode->dump) { struct stat tmp; memset(&tmp, 0, sizeof(tmp)); ret = stat(mode->device_out, &tmp); if (ret < 0) { mode->dump_dir = 0; goto try_file; } mode->dump_dir = !!S_ISDIR(tmp.st_mode); if (mode->dump_dir) { fd = begin_multi_pcap_file(mode); } else { try_file: fd = begin_single_pcap_file(mode); } } memset(&rx_ring, 0, sizeof(rx_ring)); memset(&rx_poll, 0, sizeof(rx_poll)); memset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex = device_ifindex(mode->device_in); size = ring_size(mode->device_in, mode->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(mode->filter, &bpf_ops); bpf_attach_to_sock(sock, &bpf_ops); setup_rx_ring_layout(sock, &rx_ring, size, mode->jumbo_support); create_rx_ring(sock, &rx_ring); mmap_rx_ring(sock, &rx_ring); alloc_rx_ring_frames(&rx_ring); bind_rx_ring(sock, &rx_ring, ifindex); prepare_polling(sock, &rx_poll); dissector_init_all(mode->print_mode); if (mode->cpu >= 0 && ifindex > 0) { irq = device_irq_number(mode->device_in); device_bind_irq_to_cpu(mode->cpu, irq); printf("IRQ: %s:%d > CPU%d\n", mode->device_in, irq, mode->cpu); } if (mode->promiscuous == true) { ifflags = enter_promiscuous_mode(mode->device_in); printf("PROMISC\n"); } printf("BPF:\n"); bpf_dump_all(&bpf_ops); printf("MD: RX %s\n\n", mode->dump ? pcap_ops[mode->pcap]->name : ""); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it].iov_base)) { hdr = rx_ring.frames[it].iov_base; packet = ((uint8_t *) hdr) + hdr->tp_h.tp_mac; fcnt++; if (mode->packet_type != PACKET_ALL) if (mode->packet_type != hdr->s_ll.sll_pkttype) goto next; if (unlikely(rx_ring.layout.tp_frame_size < hdr->tp_h.tp_snaplen)) { fprintf(stderr, "Skipping too large packet! " "No jumbo support selected?\n"); fflush(stderr); goto next; } if (mode->dump) { struct pcap_pkthdr phdr; tpacket_hdr_to_pcap_pkthdr(&hdr->tp_h, &phdr); ret = pcap_ops[mode->pcap]->write_pcap_pkt(fd, &phdr, packet, phdr.len); if (unlikely(ret != sizeof(phdr) + phdr.len)) panic("Write error to pcap!\n"); } show_frame_hdr(hdr, mode->print_mode, RING_MODE_INGRESS); dissector_entry_point(packet, hdr->tp_h.tp_snaplen, mode->link_type); if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) { sigint = 1; break; } next: kernel_may_pull_from_rx(&hdr->tp_h); next_slot(&it, &rx_ring); if (unlikely(sigint == 1)) break; if (mode->dump && next_dump) { struct tpacket_stats kstats; socklen_t slen = sizeof(kstats); memset(&kstats, 0, sizeof(kstats)); getsockopt(sock, SOL_PACKET, PACKET_STATISTICS, &kstats, &slen); fd = next_multi_pcap_file(mode, fd); next_dump = false; if (mode->print_mode == FNTTYPE_PRINT_NONE) { printf(".(+%u/-%u)", kstats.tp_packets - kstats.tp_drops, kstats.tp_drops); fflush(stdout); } } } poll(&rx_poll, 1, -1); poll_error_maybe_die(sock, &rx_poll); } if (!(mode->dump_dir && mode->print_mode == FNTTYPE_PRINT_NONE)) sock_print_net_stats(sock); else { printf("\n\n"); fflush(stdout); } dissector_cleanup_all(); destroy_rx_ring(sock, &rx_ring); if (mode->promiscuous == true) leave_promiscuous_mode(mode->device_in, ifflags); close(sock); if (mode->dump) { if (mode->dump_dir) finish_multi_pcap_file(mode, fd); else finish_single_pcap_file(mode, fd); } }
/* If netsniff-ngs in device is on a tap, it can efficiently filter out * some interesting packets and give them to the out device for testing * or debugging for instance. */ static void enter_mode_rx_to_tx(struct mode *mode) { int rx_sock, ifindex_in, ifindex_out; unsigned int size_in, size_out, it_in = 0, it_out = 0; unsigned long fcnt = 0; uint8_t *in, *out; short ifflags = 0; struct frame_map *hdr_in, *hdr_out; struct ring tx_ring; struct ring rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; if (!strncmp(mode->device_in, mode->device_out, strlen(mode->device_in))) panic("Ingress/egress devices must be different!\n"); if (!device_up_and_running(mode->device_out)) panic("Egress device not up and running!\n"); if (!device_up_and_running(mode->device_in)) panic("Ingress device not up and running!\n"); set_memcpy(); rx_sock = pf_socket(); tx_sock = pf_socket(); memset(&tx_ring, 0, sizeof(tx_ring)); memset(&rx_ring, 0, sizeof(rx_ring)); memset(&rx_poll, 0, sizeof(rx_poll)); memset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex_in = device_ifindex(mode->device_in); size_in = ring_size(mode->device_in, mode->reserve_size); ifindex_out = device_ifindex(mode->device_out); size_out = ring_size(mode->device_out, mode->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(mode->filter, &bpf_ops); bpf_attach_to_sock(rx_sock, &bpf_ops); setup_rx_ring_layout(rx_sock, &rx_ring, size_in, mode->jumbo_support); create_rx_ring(rx_sock, &rx_ring); mmap_rx_ring(rx_sock, &rx_ring); alloc_rx_ring_frames(&rx_ring); bind_rx_ring(rx_sock, &rx_ring, ifindex_in); prepare_polling(rx_sock, &rx_poll); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size_out, mode->jumbo_support); create_tx_ring(tx_sock, &tx_ring); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(&tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex_out); mt_init_by_seed_time(); dissector_init_all(mode->print_mode); if (mode->promiscuous == true) { ifflags = enter_promiscuous_mode(mode->device_in); printf("PROMISC\n"); } if (mode->kpull) interval = mode->kpull; itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = interval; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = interval; setitimer(ITIMER_REAL, &itimer, NULL); printf("BPF:\n"); bpf_dump_all(&bpf_ops); printf("MD: RXTX %luus\n\n", interval); printf("Running! Hang up with ^C!\n\n"); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) { hdr_in = rx_ring.frames[it_in].iov_base; in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac; fcnt++; if (mode->packet_type != PACKET_ALL) if (mode->packet_type != hdr_in->s_ll.sll_pkttype) goto next; hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN - sizeof(struct sockaddr_ll); /* If we cannot pull, look for a different slot. */ for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) && likely(!sigint);) { if (mode->randomize) next_rnd_slot(&it_out, &tx_ring); else next_slot(&it_out, &tx_ring); hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN - sizeof(struct sockaddr_ll); } tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h); __memcpy(out, in, hdr_in->tp_h.tp_len); kernel_may_pull_from_tx(&hdr_out->tp_h); if (mode->randomize) next_rnd_slot(&it_out, &tx_ring); else next_slot(&it_out, &tx_ring); /* Should actually be avoided ... */ show_frame_hdr(hdr_in, mode->print_mode, RING_MODE_INGRESS); dissector_entry_point(in, hdr_in->tp_h.tp_snaplen, mode->link_type); if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) { sigint = 1; break; } next: kernel_may_pull_from_rx(&hdr_in->tp_h); next_slot(&it_in, &rx_ring); if (unlikely(sigint == 1)) goto out; } poll(&rx_poll, 1, -1); poll_error_maybe_die(rx_sock, &rx_poll); } out: sock_print_net_stats(rx_sock); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); destroy_rx_ring(rx_sock, &rx_ring); if (mode->promiscuous == true) leave_promiscuous_mode(mode->device_in, ifflags); close(tx_sock); close(rx_sock); }
static void recv_only_or_dump(struct ctx *ctx) { short ifflags = 0; int sock, irq, ifindex, fd = 0, ret; unsigned int size, it = 0; struct ring rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; struct timeval start, end, diff; struct block_desc *pbd; unsigned long frame_count = 0; sock = pf_socket(); if (ctx->rfraw) { ctx->device_trans = xstrdup(ctx->device_in); xfree(ctx->device_in); enter_rfmon_mac80211(ctx->device_trans, &ctx->device_in); ctx->link_type = LINKTYPE_IEEE802_11; } fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex = device_ifindex(ctx->device_in); size = ring_size(ctx->device_in, ctx->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); bpf_attach_to_sock(sock, &bpf_ops); ret = set_sockopt_hwtimestamp(sock, ctx->device_in); if (ret == 0 && ctx->verbose) printf("HW timestamping enabled\n"); setup_rx_ring_layout(sock, &rx_ring, size, true, true); create_rx_ring(sock, &rx_ring, ctx->verbose); mmap_rx_ring(sock, &rx_ring); alloc_rx_ring_frames(sock, &rx_ring); bind_rx_ring(sock, &rx_ring, ifindex); prepare_polling(sock, &rx_poll); dissector_init_all(ctx->print_mode); if (ctx->cpu >= 0 && ifindex > 0) { irq = device_irq_number(ctx->device_in); device_set_irq_affinity(irq, ctx->cpu); if (ctx->verbose) printf("IRQ: %s:%d > CPU%d\n", ctx->device_in, irq, ctx->cpu); } if (ctx->promiscuous) ifflags = enter_promiscuous_mode(ctx->device_in); if (dump_to_pcap(ctx) && __pcap_io->init_once_pcap) __pcap_io->init_once_pcap(); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (dump_to_pcap(ctx)) { __label__ try_file; struct stat stats; fmemset(&stats, 0, sizeof(stats)); ret = stat(ctx->device_out, &stats); if (ret < 0) { ctx->dump_dir = 0; goto try_file; } ctx->dump_dir = S_ISDIR(stats.st_mode); if (ctx->dump_dir) { fd = begin_multi_pcap_file(ctx); } else { try_file: fd = begin_single_pcap_file(ctx); } } printf("Running! Hang up with ^C!\n\n"); fflush(stdout); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0)) { while (user_may_pull_from_rx_block((pbd = (void *) rx_ring.frames[it].iov_base))) { walk_t3_block(pbd, ctx, sock, &fd, &frame_count); kernel_may_pull_from_rx_block(pbd); it = (it + 1) % rx_ring.layout3.tp_block_nr; if (unlikely(sigint == 1)) break; } poll(&rx_poll, 1, -1); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (!(ctx->dump_dir && ctx->print_mode == PRINT_NONE)) { sock_rx_net_stats(sock, frame_count); printf("\r%12lu sec, %lu usec in total\n", diff.tv_sec, diff.tv_usec); } else { printf("\n\n"); fflush(stdout); } bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_rx_ring(sock, &rx_ring); if (ctx->promiscuous) leave_promiscuous_mode(ctx->device_in, ifflags); if (ctx->rfraw) leave_rfmon_mac80211(ctx->device_trans, ctx->device_in); if (dump_to_pcap(ctx)) { if (ctx->dump_dir) finish_multi_pcap_file(ctx, fd); else finish_single_pcap_file(ctx, fd); } close(sock); }
static void receive_to_xmit(struct ctx *ctx) { short ifflags = 0; uint8_t *in, *out; int rx_sock, ifindex_in, ifindex_out; unsigned int size_in, size_out, it_in = 0, it_out = 0; unsigned long frame_count = 0; struct frame_map *hdr_in, *hdr_out; struct ring tx_ring, rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; if (!strncmp(ctx->device_in, ctx->device_out, IFNAMSIZ)) panic("Ingress/egress devices must be different!\n"); if (!device_up_and_running(ctx->device_out)) panic("Egress device not up and running!\n"); rx_sock = pf_socket(); tx_sock = pf_socket(); fmemset(&tx_ring, 0, sizeof(tx_ring)); fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex_in = device_ifindex(ctx->device_in); ifindex_out = device_ifindex(ctx->device_out); size_in = ring_size(ctx->device_in, ctx->reserve_size); size_out = ring_size(ctx->device_out, ctx->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); bpf_attach_to_sock(rx_sock, &bpf_ops); setup_rx_ring_layout(rx_sock, &rx_ring, size_in, ctx->jumbo, false); create_rx_ring(rx_sock, &rx_ring, ctx->verbose); mmap_rx_ring(rx_sock, &rx_ring); alloc_rx_ring_frames(rx_sock, &rx_ring); bind_rx_ring(rx_sock, &rx_ring, ifindex_in); prepare_polling(rx_sock, &rx_poll); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size_out, ctx->jumbo); create_tx_ring(tx_sock, &tx_ring, ctx->verbose); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(tx_sock, &tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex_out); dissector_init_all(ctx->print_mode); if (ctx->promiscuous) ifflags = enter_promiscuous_mode(ctx->device_in); if (ctx->kpull) interval = ctx->kpull; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); printf("Running! Hang up with ^C!\n\n"); fflush(stdout); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) { __label__ next; hdr_in = rx_ring.frames[it_in].iov_base; in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac; frame_count++; if (ctx->packet_type != -1) if (ctx->packet_type != hdr_in->s_ll.sll_pkttype) goto next; hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) && likely(!sigint);) { if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); } tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h); fmemcpy(out, in, hdr_in->tp_h.tp_len); kernel_may_pull_from_tx(&hdr_out->tp_h); if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } show_frame_hdr(hdr_in, ctx->print_mode); dissector_entry_point(in, hdr_in->tp_h.tp_snaplen, ctx->link_type, ctx->print_mode); if (frame_count_max != 0) { if (frame_count >= frame_count_max) { sigint = 1; break; } } next: kernel_may_pull_from_rx(&hdr_in->tp_h); it_in++; if (it_in >= rx_ring.layout.tp_frame_nr) it_in = 0; if (unlikely(sigint == 1)) goto out; } poll(&rx_poll, 1, -1); } out: timer_purge(); sock_rx_net_stats(rx_sock, 0); bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); destroy_rx_ring(rx_sock, &rx_ring); if (ctx->promiscuous) leave_promiscuous_mode(ctx->device_in, ifflags); close(tx_sock); close(rx_sock); }
static void enter_mode_rx_only_or_dump(struct mode *mode) { int sock, irq, ifindex, fd = 0, ret; unsigned int size, it = 0; unsigned long fcnt = 0, skipped = 0; short ifflags = 0; uint8_t *packet; struct ring rx_ring; struct pollfd rx_poll; struct frame_map *hdr; struct sock_fprog bpf_ops; struct timeval start, end, diff; if (!device_up_and_running(mode->device_in)) panic("Device not up and running!\n"); sock = pf_socket(); if (mode->rfraw) { mode->device_trans = xstrdup(mode->device_in); xfree(mode->device_in); enter_rfmon_mac80211(mode->device_trans, &mode->device_in); mode->link_type = LINKTYPE_IEEE802_11; } if (mode->dump) { struct stat tmp; fmemset(&tmp, 0, sizeof(tmp)); ret = stat(mode->device_out, &tmp); if (ret < 0) { mode->dump_dir = 0; goto try_file; } mode->dump_dir = !!S_ISDIR(tmp.st_mode); if (mode->dump_dir) { fd = begin_multi_pcap_file(mode); } else { try_file: fd = begin_single_pcap_file(mode); } } fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex = device_ifindex(mode->device_in); size = ring_size(mode->device_in, mode->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(mode->filter, &bpf_ops); bpf_attach_to_sock(sock, &bpf_ops); set_sockopt_hwtimestamp(sock, mode->device_in); setup_rx_ring_layout(sock, &rx_ring, size, mode->jumbo_support); create_rx_ring(sock, &rx_ring); mmap_rx_ring(sock, &rx_ring); alloc_rx_ring_frames(&rx_ring); bind_rx_ring(sock, &rx_ring, ifindex); prepare_polling(sock, &rx_poll); dissector_init_all(mode->print_mode); if (mode->cpu >= 0 && ifindex > 0) { irq = device_irq_number(mode->device_in); device_bind_irq_to_cpu(mode->cpu, irq); printf("IRQ: %s:%d > CPU%d\n", mode->device_in, irq, mode->cpu); } if (mode->promiscuous == true) { ifflags = enter_promiscuous_mode(mode->device_in); printf("PROMISC\n"); } printf("BPF:\n"); bpf_dump_all(&bpf_ops); printf("MD: RX %s ", mode->dump ? pcap_ops[mode->pcap]->name : ""); if (mode->rfraw) printf("802.11 raw via %s ", mode->device_in); #ifdef _LARGEFILE64_SOURCE printf("lf64 "); #endif ioprio_print(); printf("\n"); gettimeofday(&start, NULL); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it].iov_base)) { hdr = rx_ring.frames[it].iov_base; packet = ((uint8_t *) hdr) + hdr->tp_h.tp_mac; fcnt++; if (mode->packet_type != PACKET_ALL) if (mode->packet_type != hdr->s_ll.sll_pkttype) goto next; if (unlikely(ring_frame_size(&rx_ring) < hdr->tp_h.tp_snaplen)) { skipped++; goto next; } if (mode->dump) { struct pcap_pkthdr phdr; tpacket_hdr_to_pcap_pkthdr(&hdr->tp_h, &phdr); ret = pcap_ops[mode->pcap]->write_pcap_pkt(fd, &phdr, packet, phdr.len); if (unlikely(ret != sizeof(phdr) + phdr.len)) panic("Write error to pcap!\n"); } show_frame_hdr(hdr, mode->print_mode, RING_MODE_INGRESS); dissector_entry_point(packet, hdr->tp_h.tp_snaplen, mode->link_type, mode->print_mode); if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) { sigint = 1; break; } next: kernel_may_pull_from_rx(&hdr->tp_h); next_slot_prerd(&it, &rx_ring); if (unlikely(sigint == 1)) break; if (mode->dump && next_dump) { struct tpacket_stats kstats; socklen_t slen = sizeof(kstats); fmemset(&kstats, 0, sizeof(kstats)); getsockopt(sock, SOL_PACKET, PACKET_STATISTICS, &kstats, &slen); fd = next_multi_pcap_file(mode, fd); next_dump = false; if (mode->print_mode == FNTTYPE_PRINT_NONE) { printf(".(+%lu/-%lu)", 1UL * kstats.tp_packets - kstats.tp_drops - skipped, 1UL * kstats.tp_drops + skipped); fflush(stdout); } } } poll(&rx_poll, 1, -1); poll_error_maybe_die(sock, &rx_poll); } gettimeofday(&end, NULL); diff = tv_subtract(end, start); if (!(mode->dump_dir && mode->print_mode == FNTTYPE_PRINT_NONE)) { sock_print_net_stats(sock, skipped); printf("\r%12lu sec, %lu usec in total\n", diff.tv_sec, diff.tv_usec); } else { printf("\n\n"); fflush(stdout); } bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_rx_ring(sock, &rx_ring); if (mode->promiscuous == true) leave_promiscuous_mode(mode->device_in, ifflags); if (mode->rfraw) leave_rfmon_mac80211(mode->device_trans, mode->device_in); close(sock); if (mode->dump) { if (mode->dump_dir) finish_multi_pcap_file(mode, fd); else finish_single_pcap_file(mode, fd); } }
static void recv_only_or_dump(struct ctx *ctx) { uint8_t *packet; short ifflags = 0; int sock, irq, ifindex, fd = 0, ret; unsigned int size, it = 0; unsigned long frame_count = 0, skipped = 0; struct ring rx_ring; struct pollfd rx_poll; struct frame_map *hdr; struct sock_fprog bpf_ops; struct timeval start, end, diff; pcap_pkthdr_t phdr; if (!device_up_and_running(ctx->device_in) && !ctx->rfraw) panic("Device not up and running!\n"); sock = pf_socket(); if (ctx->rfraw) { ctx->device_trans = xstrdup(ctx->device_in); xfree(ctx->device_in); enter_rfmon_mac80211(ctx->device_trans, &ctx->device_in); ctx->link_type = LINKTYPE_IEEE802_11; } fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex = device_ifindex(ctx->device_in); size = ring_size(ctx->device_in, ctx->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); bpf_attach_to_sock(sock, &bpf_ops); set_sockopt_hwtimestamp(sock, ctx->device_in); setup_rx_ring_layout(sock, &rx_ring, size, ctx->jumbo); create_rx_ring(sock, &rx_ring, ctx->verbose); mmap_rx_ring(sock, &rx_ring); alloc_rx_ring_frames(&rx_ring); bind_rx_ring(sock, &rx_ring, ifindex); prepare_polling(sock, &rx_poll); dissector_init_all(ctx->print_mode); if (ctx->cpu >= 0 && ifindex > 0) { irq = device_irq_number(ctx->device_in); device_bind_irq_to_cpu(irq, ctx->cpu); if (ctx->verbose) printf("IRQ: %s:%d > CPU%d\n", ctx->device_in, irq, ctx->cpu); } if (ctx->promiscuous) ifflags = enter_promiscuous_mode(ctx->device_in); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (dump_to_pcap(ctx)) { __label__ try_file; struct stat stats; fmemset(&stats, 0, sizeof(stats)); ret = stat(ctx->device_out, &stats); if (ret < 0) { ctx->dump_dir = 0; goto try_file; } ctx->dump_dir = S_ISDIR(stats.st_mode); if (ctx->dump_dir) { fd = begin_multi_pcap_file(ctx); } else { try_file: fd = begin_single_pcap_file(ctx); } } printf("Running! Hang up with ^C!\n\n"); fflush(stdout); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it].iov_base)) { __label__ next; hdr = rx_ring.frames[it].iov_base; packet = ((uint8_t *) hdr) + hdr->tp_h.tp_mac; frame_count++; if (ctx->packet_type != -1) if (ctx->packet_type != hdr->s_ll.sll_pkttype) goto next; if (unlikely(ring_frame_size(&rx_ring) < hdr->tp_h.tp_snaplen)) { skipped++; goto next; } if (dump_to_pcap(ctx)) { tpacket_hdr_to_pcap_pkthdr(&hdr->tp_h, &hdr->s_ll, &phdr, ctx->magic); ret = __pcap_io->write_pcap(fd, &phdr, ctx->magic, packet, pcap_get_length(&phdr, ctx->magic)); if (unlikely(ret != pcap_get_total_length(&phdr, ctx->magic))) panic("Write error to pcap!\n"); } show_frame_hdr(hdr, ctx->print_mode); dissector_entry_point(packet, hdr->tp_h.tp_snaplen, ctx->link_type, ctx->print_mode); if (frame_count_max != 0) { if (frame_count >= frame_count_max) { sigint = 1; break; } } next: kernel_may_pull_from_rx(&hdr->tp_h); it++; if (it >= rx_ring.layout.tp_frame_nr) it = 0; if (unlikely(sigint == 1)) break; if (dump_to_pcap(ctx)) { if (ctx->dump_mode == DUMP_INTERVAL_SIZE) { interval += hdr->tp_h.tp_snaplen; if (interval > ctx->dump_interval) { next_dump = true; interval = 0; } } if (next_dump) { fd = next_multi_pcap_file(ctx, fd); next_dump = false; if (ctx->verbose) print_pcap_file_stats(sock, ctx, skipped); } } } poll(&rx_poll, 1, -1); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (!(ctx->dump_dir && ctx->print_mode == PRINT_NONE)) { sock_print_net_stats(sock, skipped); printf("\r%12lu sec, %lu usec in total\n", diff.tv_sec, diff.tv_usec); } else { printf("\n\n"); fflush(stdout); } bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_rx_ring(sock, &rx_ring); if (ctx->promiscuous) leave_promiscuous_mode(ctx->device_in, ifflags); if (ctx->rfraw) leave_rfmon_mac80211(ctx->device_trans, ctx->device_in); if (dump_to_pcap(ctx)) { if (ctx->dump_dir) finish_multi_pcap_file(ctx, fd); else finish_single_pcap_file(ctx, fd); } close(sock); }