Ejemplo n.º 1
0
void *packet_consumer_thread(void *_i) {
  struct dir_info *i = (struct dir_info *) _i;
  int tx_queue_not_empty = 0;

  if (i->bind_core >= 0)
    bind2core(i->bind_core);

  while(!do_shutdown) {

    if(pfring_zc_recv_pkt(i->inzq, &i->tmpbuff, 0 /* wait_for_packet */) > 0) {

      if (unlikely(verbose)) {
#if 1
        char bigbuf[4096];
        pfring_print_pkt(bigbuf, sizeof(bigbuf), pfring_zc_pkt_buff_data(i->tmpbuff, i->inzq), i->tmpbuff->len, i->tmpbuff->len);
        fputs(bigbuf, stdout);
#else
	u_char *pkt_data = pfring_zc_pkt_buff_data(i->tmpbuff, i->inzq);
        int j;
        for(j = 0; j < i->tmpbuff->len; j++)
          printf("%02X ", pkt_data[j]);
        printf("\n");
#endif
      }

      i->numPkts++;
      i->numBytes += i->tmpbuff->len + 24; /* 8 Preamble + 4 CRC + 12 IFG */
      
      errno = 0;
      while (unlikely(pfring_zc_send_pkt(i->outzq, &i->tmpbuff, flush_packet) < 0 && errno != EMSGSIZE && !do_shutdown))
        if (wait_for_packet) usleep(1);

      tx_queue_not_empty = 1;
    } else {
      if (tx_queue_not_empty) {
        pfring_zc_sync_queue(i->outzq, tx_only);
        tx_queue_not_empty = 0;
      }
      if (wait_for_packet) 
        usleep(1);
    }

  }

  if (!flush_packet) pfring_zc_sync_queue(i->outzq, tx_only);
  pfring_zc_sync_queue(i->inzq, rx_only);

  return NULL;
}
Ejemplo n.º 2
0
static int pfring_zc_daq_acquire_best_effort(void *handle, int cnt, DAQ_Analysis_Func_t callback,
#if (DAQ_API_VERSION >= 0x00010002)
        DAQ_Meta_Func_t metaback,
#endif
        void *user) {
    Pfring_Context_t *context = (Pfring_Context_t *) handle;
    int ret = 0, c = 0;
    u_int32_t rx_ring_idx = context->num_devices - 1, rx_ring_idx_clone;
    u_int32_t mask = 0;
    u_char *pkt_buffer;

    context->analysis_func = callback;
    context->breakloop = 0;

    while (!context->breakloop && (cnt <= 0 || c < cnt)) {

#ifdef SIG_RELOAD
        if (pfring_zc_daq_reload_requested)
            pfring_zc_daq_reload(context);
#endif

        while (pfring_zc_daq_in_packets(context, &rx_ring_idx) && !context->breakloop) {

            pfring_zc_recv_pkt(context->rx_queues[rx_ring_idx], &context->buffer, 0);

            context->buffer->hash = context->ifindexes[rx_ring_idx];

            pkt_buffer = pfring_zc_pkt_buff_data(context->buffer, context->rx_queues[rx_ring_idx]);

#ifdef ENABLE_BPF
            if (!context->bpf_filter || bpf_filter(context->filter.bf_insns, pkt_buffer, context->buffer->len /* caplen */, context->buffer->len) != 0) { /* accept */
#endif
                /* enqueueing pkt (and don't care of no room available) */
                mask = 1 << (context->num_devices);
#ifdef ENABLE_BPF
            } else {
                context->stats.packets_received++;
                context->stats.verdicts[DAQ_VERDICT_PASS]++;
            }
#endif
            mask |= 1 << (rx_ring_idx ^ 0x1);
            pfring_zc_send_pkt_multi(context->mq, &context->buffer, mask, 0);
        }

        rx_ring_idx_clone = rx_ring_idx;
        while (!(ret = pfring_zc_daq_in_packets(context, &rx_ring_idx_clone)) && !pfring_zc_queue_is_empty(context->q) && !context->breakloop) {
            ret = pfring_zc_recv_pkt(context->q, &context->buffer, 0);
            pfring_zc_daq_process(context, context->buffer, user);
            c++;
        }

        if (!ret) {
            if (usleep(1) == -1)
                if (errno == EINTR)
                    break;
        }
    }

    return 0;
}
Ejemplo n.º 3
0
int32_t sysdig_distribution_func(pfring_zc_pkt_buff *pkt_handle, pfring_zc_queue *in_queue, void *user) {
  /* NOTE: pkt_handle->hash contains the CPU id */
  struct sysdig_event_header *ev = (struct sysdig_event_header*)pfring_zc_pkt_buff_data(pkt_handle, in_queue); 
  long num_out_queues = (long) user;

  return(ev->thread_id % num_out_queues);
}
Ejemplo n.º 4
0
void* consumer_thread(void* _id) {
  long id = (long) _id;

  pfring_zc_pkt_buff *b = buffers[id];

  bind2core(bind_core[id]);

  while(!consumers_stats[id].do_shutdown) {

    if(pfring_zc_recv_pkt(outzq[id], &b, wait_for_packet) > 0) {

#if 0
      int i;

      for(i = 0; i < b->len; i++)
        printf("%02X ", pfring_zc_pkt_buff_data(b, outzq[id])[i]);
      printf("\n");
#endif

      consumers_stats[id].numPkts++;
      consumers_stats[id].numBytes += b->len + 24; /* 8 Preamble + 4 CRC + 12 IFG */
    }

  }

  pfring_zc_sync_queue(outzq[id], rx_only);

  return NULL;
}
Ejemplo n.º 5
0
static inline void pfring_zc_daq_process(Pfring_Context_t *context, pfring_zc_pkt_buff *buffer, void *user) {
    DAQ_PktHdr_t hdr;
    DAQ_Verdict verdict;
    u_char *pkt_buffer;

    hdr.pktlen = hdr.caplen = buffer->len;
    hdr.ts.tv_sec = buffer->ts.tv_sec;
    hdr.ts.tv_usec = buffer->ts.tv_nsec/1000;
#if (DAQ_API_VERSION >= 0x00010002)
    hdr.ingress_index = buffer->hash;
    hdr.egress_index = -1;
    hdr.ingress_group = -1;
    hdr.egress_group = -1;
#else
    hdr.device_index = buffer->hash;
#endif
    hdr.flags = 0;

    pkt_buffer = pfring_zc_pkt_buff_data(buffer, context->q);

    verdict = context->analysis_func(user, &hdr, pkt_buffer);

    if (verdict >= MAX_DAQ_VERDICT)
        verdict = DAQ_VERDICT_PASS;

    switch(verdict) {
    case DAQ_VERDICT_BLACKLIST: /* Block the packet and block all future packets in the same flow systemwide. */
        /* TODO handle hw filters */
        break;
    case DAQ_VERDICT_WHITELIST: /* Pass the packet and fastpath all future packets in the same flow systemwide. */
    case DAQ_VERDICT_IGNORE:    /* Pass the packet and fastpath all future packets in the same flow for this application. */
    case DAQ_VERDICT_PASS:      /* Pass the packet */
    case DAQ_VERDICT_REPLACE:   /* Pass a packet that has been modified in-place.(No resizing allowed!) */
    case DAQ_VERDICT_BLOCK:     /* Block the packet. */
        /* Nothing to do really */
        break;
    case MAX_DAQ_VERDICT:
        /* No way we can reach this point */
        break;
    }

    context->stats.packets_received++;
    context->stats.verdicts[verdict]++;
}
Ejemplo n.º 6
0
void recv_packets()
{
	int ret;
	// Poll for packets
	do {
		ret = pfring_zc_recv_pkt(pf_recv, &pf_buffer, 0);
		if (ret == 0) {
			usleep(1000);
		}
	} while (ret == 0);
	// Handle other errors, by not doing anything and logging
	if (ret != 1) {
		log_error("recv", "Error: %d", ret);
		return;
	}
	// Successfully got a packet, now handle it
	uint8_t* pkt_buf = pfring_zc_pkt_buff_data(pf_buffer, pf_recv);
	handle_packet(pf_buffer->len, pkt_buf);
}
Ejemplo n.º 7
0
void *packet_consumer_thread(void *_id) {
  struct volatile_globals *g = globals;

  bind2core(bind_core);

  while(!g->do_shutdown) {

    if(pfring_zc_recv_pkt(zq, &buffer, g->wait_for_packet) > 0) {

      if (unlikely(g->verbose)) {
        u_char *pkt_data = pfring_zc_pkt_buff_data(buffer, zq);

        if (buffer->ts.tv_nsec)
          printf("[%u.%u] ", buffer->ts.tv_sec, buffer->ts.tv_nsec);

	if(g->dump_as_sysdig_event) {
	  struct sysdig_event_header *ev = (struct sysdig_event_header*)pkt_data;

	  printf("[cpu_id=%u][tid=%lu][%u|%s]",
		 buffer->hash, ev->thread_id,
		 ev->event_type, sysdig_event2name(ev->event_type));		 
	} else {
	  int i;

	  for(i = 0; i < buffer->len; i++)
	    printf("%02X ", pkt_data[i]);
	}

        printf("\n");
      }

      g->numPkts++;
      g->numBytes += buffer->len + 24; /* 8 Preamble + 4 CRC + 12 IFG */
    }
  }

   pfring_zc_sync_queue(zq, rx_only);

  return NULL;
}
Ejemplo n.º 8
0
static int pfring_zc_daq_inject(void *handle, const DAQ_PktHdr_t *hdr,
                                const uint8_t *packet_data, uint32_t len, int reverse) {
    Pfring_Context_t *context = (Pfring_Context_t *) handle;
    int i, tx_ring_idx = DAQ_PF_RING_PASSIVE_DEV_IDX;

    if (!(context->mode == DAQ_MODE_INLINE || (context->mode == DAQ_MODE_PASSIVE && context->ids_bridge)))
        return DAQ_ERROR;

    for (i = 0; i < context->num_devices; i++) {
        if (context->ifindexes[i] ==
#if (DAQ_API_VERSION >= 0x00010002)
                hdr->ingress_index
#else
                hdr->device_index
#endif
           ) {
            tx_ring_idx = i ^ 0x1; /* TODO Check this (do we have to send to i or i ^ 0x1?) */
            break;
        }
    }

    memcpy(
        pfring_zc_pkt_buff_data(context->buffer_inject, context->rx_queues[tx_ring_idx]),
        packet_data,
        len
    );

    if (pfring_zc_send_pkt(context->tx_queues[tx_ring_idx],
                           &context->buffer_inject, 1 /* flush packet */) < 0) {
        DPE(context->errbuf, "%s", "pfring_zc_send_pkt() error");
        return DAQ_ERROR;
    }

    context->stats.packets_injected++;
    return DAQ_SUCCESS;
}
Ejemplo n.º 9
0
void *packet_consumer_thread(void *data) {

  if (bind_core >= 0)
    bind2core(bind_core);

  while(!do_shutdown) {

    if(pfring_zc_recv_pkt(inzq, &tmpbuff, wait_for_packet) > 0) {

      if (unlikely(verbose)) {
        char bigbuf[4096];
        pfring_print_pkt(bigbuf, sizeof(bigbuf), pfring_zc_pkt_buff_data(tmpbuff, inzq), tmpbuff->len, tmpbuff->len);
        fputs(bigbuf, stdout);
      }
#if 0
      int i;
      u_char *pkt_data = pfring_zc_pkt_buff_data(tmpbuff, inzq),
      for(i = 0; i < tmpbuff->len; i++)
        printf("%02X ", pkt_data[i]);
      printf("\n");
#endif

      numPkts++;
      numBytes += tmpbuff->len + 24; /* 8 Preamble + 4 CRC + 12 IFG */

      while (unlikely(pfring_zc_send_pkt(outzq, &tmpbuff, flush_packet) < 0 && !do_shutdown))
        usleep(1);
    }

  }

  if (!flush_packet) pfring_zc_sync_queue(outzq, tx_only);
  pfring_zc_sync_queue(inzq, rx_only);

  return NULL;
}
Ejemplo n.º 10
0
void *send_traffic(void *user) {
  ticks hz, tick_start = 0, tick_delta = 0;
  u_int64_t ts_ns_start = 0, ns_delta = 0;
  u_int32_t buffer_id = 0;
  int sent_bytes;
#ifdef BURST_API
  int i, sent_packets;
#endif

  if (bind_core >= 0)
    bind2core(bind_core);

  if(pps > 0) {
    if (use_pulse_time) {
      ts_ns_start = *pulse_timestamp_ns;
      ns_delta = (double) (1000000000 / pps);
    } else {
      /* cumputing usleep delay */
      tick_start = getticks();
      usleep(1);
      tick_delta = getticks() - tick_start;

      /* cumputing CPU freq */
      tick_start = getticks();
      usleep(1001);
      hz = (getticks() - tick_start - tick_delta) * 1000 /*kHz -> Hz*/;
      printf("Estimated CPU freq: %lu Hz\n", (long unsigned int) hz);

      tick_delta = (double) (hz / pps);
      tick_start = getticks();
    }
  }

#ifdef BURST_API  
  /****** Burst API ******/
  if (use_pkt_burst_api) {
  while (likely(!do_shutdown && (!num_to_send || numPkts < num_to_send))) {

    if (!num_queue_buffers || numPkts < num_queue_buffers + NBUFF || num_ips > 1) { /* forge all buffers 1 time */
      for (i = 0; i < BURSTLEN; i++) {
        buffers[buffer_id + i]->len = packet_len;
        if (stdin_packet_len > 0)
          memcpy(pfring_zc_pkt_buff_data(buffers[buffer_id + i], zq), stdin_packet, stdin_packet_len);
        else
          forge_udp_packet(pfring_zc_pkt_buff_data(buffers[buffer_id + i], zq), numPkts + i);
      }
    }

    /* TODO send unsent packets when a burst is partially sent */
    while (unlikely((sent_packets = pfring_zc_send_pkt_burst(zq, &buffers[buffer_id], BURSTLEN, flush_packet)) <= 0)) {
      if (unlikely(do_shutdown)) break;
      if (!active) usleep(1);
    }

    numPkts += sent_packets;
    numBytes += ((packet_len + 24 /* 8 Preamble + 4 CRC + 12 IFG */ ) * sent_packets);

    buffer_id += BURSTLEN;
    buffer_id &= NBUFFMASK;

    if(pps > 0) {
      u_int8_t synced = 0;
      if (use_pulse_time) {
        while(*pulse_timestamp_ns - ts_ns_start < numPkts * ns_delta && !do_shutdown)
          if (!synced) pfring_zc_sync_queue(zq, tx_only), synced = 1;
      } else {
        while((getticks() - tick_start) < (numPkts * tick_delta))
          if (!synced) pfring_zc_sync_queue(zq, tx_only), synced = 1;
      }
    }

  } 

  } else {
#endif

  /****** Packet API ******/
  while (likely(!do_shutdown && (!num_to_send || numPkts < num_to_send))) {

    buffers[buffer_id]->len = packet_len;

#if 1
    if (!num_queue_buffers || numPkts < num_queue_buffers + NBUFF || num_ips > 1) { /* forge all buffers 1 time */
      if (stdin_packet_len > 0)
        memcpy(pfring_zc_pkt_buff_data(buffers[buffer_id], zq), stdin_packet, stdin_packet_len);
      else
        forge_udp_packet(pfring_zc_pkt_buff_data(buffers[buffer_id], zq), numPkts);
    }
#else
    {
      u_char *pkt_data = pfring_zc_pkt_buff_data(buffers[buffer_id], zq);
      int k;
      u_int8_t j = numPkts;
      for(k = 0; k < buffers[buffer_id]->len; k++)
        pkt_data[k] = j++;
      pkt_data[k-1] = cluster_id;
    }
#endif

    if (append_timestamp)
      buffers[buffer_id]->len = append_packet_ts(pfring_zc_pkt_buff_data(buffers[buffer_id], zq), buffers[buffer_id]->len);

    while (unlikely((sent_bytes = pfring_zc_send_pkt(zq, &buffers[buffer_id], flush_packet)) < 0)) {
      if (unlikely(do_shutdown)) break;
      if (!active) usleep(1);
    }

    numPkts++;
    numBytes += sent_bytes + 24; /* 8 Preamble + 4 CRC + 12 IFG */

    buffer_id++;
    buffer_id &= NBUFFMASK;

    if(pps > 0) {
      u_int8_t synced = 0;
      if (use_pulse_time) {
        while(*pulse_timestamp_ns - ts_ns_start < numPkts * ns_delta && !do_shutdown)
          if (!synced) pfring_zc_sync_queue(zq, tx_only), synced = 1;
      } else {
        while((getticks() - tick_start) < (numPkts * tick_delta))
          if (!synced) pfring_zc_sync_queue(zq, tx_only), synced = 1;
      }
    }
  }

#ifdef BURST_API  
  }
#endif

  if (!flush_packet) 
    pfring_zc_sync_queue(zq, tx_only);

  return NULL;
}
Ejemplo n.º 11
0
static int pfring_zc_daq_acquire(void *handle, int cnt, DAQ_Analysis_Func_t callback,
#if (DAQ_API_VERSION >= 0x00010002)
                                 DAQ_Meta_Func_t metaback,
#endif
                                 void *user) {
    Pfring_Context_t *context = (Pfring_Context_t *) handle;
    int ret = 0, i = 0, rx_ring_idx = context->num_devices - 1, c = 0;
    DAQ_PktHdr_t hdr;
    DAQ_Verdict verdict;
    u_char *pkt_buffer;

#ifdef DAQ_PF_RING_BEST_EFFORT_BOOST
    if (context->mode == DAQ_MODE_PASSIVE && context->ids_bridge == 2)
        return pfring_zc_daq_acquire_best_effort(handle, cnt, callback,
#if (DAQ_API_VERSION >= 0x00010002)
                metaback,
#endif
                user);
#endif

    context->analysis_func = callback;
    context->breakloop = 0;

    while (!context->breakloop && (cnt <= 0 || c < cnt)) {

#ifdef SIG_RELOAD
        if (pfring_zc_daq_reload_requested)
            pfring_zc_daq_reload(context);
#endif

        for (i = 0; i < context->num_devices; i++) {
            rx_ring_idx = (rx_ring_idx + 1) % context->num_devices;

            ret = pfring_zc_recv_pkt(context->rx_queues[rx_ring_idx], &context->buffer, 0 /* Dont't wait */);

            if (ret > 0)
                break;
        }

        if (ret <= 0) {
            if (usleep(1) == -1)
                if (errno == EINTR)
                    break;
            continue;
        }

        hdr.pktlen = hdr.caplen = context->buffer->len;
        hdr.ts.tv_sec = context->buffer->ts.tv_sec;
        hdr.ts.tv_usec = context->buffer->ts.tv_nsec/1000;
#if (DAQ_API_VERSION >= 0x00010002)
        hdr.ingress_index = context->ifindexes[rx_ring_idx];
        hdr.egress_index = -1;
        hdr.ingress_group = -1;
        hdr.egress_group = -1;
#else
        hdr.device_index = context->ifindexes[rx_ring_idx];
#endif
        hdr.flags = 0;

        pkt_buffer = pfring_zc_pkt_buff_data(context->buffer, context->rx_queues[rx_ring_idx]);

#ifdef ENABLE_BPF
        if (!context->bpf_filter || bpf_filter(context->filter.bf_insns, pkt_buffer, hdr.caplen, hdr.pktlen) != 0) { /* analyse */
#endif
            verdict = context->analysis_func(user, &hdr, pkt_buffer);
#ifdef ENABLE_BPF
        } else
            verdict = DAQ_VERDICT_PASS;
#endif

        if (verdict >= MAX_DAQ_VERDICT)
            verdict = DAQ_VERDICT_PASS;

        if (context->mode == DAQ_MODE_PASSIVE && context->ids_bridge) { /* always forward the packet */

            pfring_zc_daq_send_packet(context, context->tx_queues[rx_ring_idx ^ 0x1], hdr.caplen);

        } else if (context->mode == DAQ_MODE_INLINE && verdict != DAQ_VERDICT_PASS /* optimisation */ ) {
            /* parsing eth_type to forward ARP */
            struct ethhdr *eh = (struct ethhdr *) pkt_buffer;
            u_int16_t eth_type = ntohs(eh->h_proto);
            u_int16_t vlan_offset = 0;
            if (eth_type == 0x8100 /* 802.1q (VLAN) */) {
                struct eth_vlan_hdr *vh;
                vlan_offset = sizeof(struct ethhdr) - sizeof(struct eth_vlan_hdr);
                while (eth_type == 0x8100 /* 802.1q (VLAN) */ ) {
                    vlan_offset += sizeof(struct eth_vlan_hdr);
                    vh = (struct eth_vlan_hdr *) &pkt_buffer[vlan_offset];
                    eth_type = ntohs(vh->h_proto);
                }
            }

            if (eth_type == 0x0806 /* ARP */ )
                verdict = DAQ_VERDICT_PASS;
        }

        switch(verdict) {
        case DAQ_VERDICT_BLACKLIST: /* Block the packet and block all future packets in the same flow systemwide. */
            /* TODO handle hw filters */
            break;
        case DAQ_VERDICT_WHITELIST: /* Pass the packet and fastpath all future packets in the same flow systemwide. */
        case DAQ_VERDICT_IGNORE:    /* Pass the packet and fastpath all future packets in the same flow for this application. */
        case DAQ_VERDICT_PASS:      /* Pass the packet */
        case DAQ_VERDICT_REPLACE:   /* Pass a packet that has been modified in-place.(No resizing allowed!) */
            if (context->mode == DAQ_MODE_INLINE)
                pfring_zc_daq_send_packet(context, context->tx_queues[rx_ring_idx ^ 0x1], hdr.caplen);
            break;
        case DAQ_VERDICT_BLOCK:     /* Block the packet. */
            /* Nothing to do really */
            break;
        case MAX_DAQ_VERDICT:
            /* No way we can reach this point */
            break;
        }

        context->stats.packets_received++;
        context->stats.verdicts[verdict]++;
        c++;
    }

    return 0;
}