Example #1
0
int pfring_bundle_add(pfring_bundle *bundle, pfring *ring) {
  if(bundle->num_sockets >= (MAX_NUM_BUNDLE_ELEMENTS-1))
    return(-1);

  bundle->sockets[bundle->num_sockets] = ring;
  bundle->pfd[bundle->num_sockets].fd = pfring_get_selectable_fd(ring);
  bundle->num_sockets++;

  pfring_enable_ring(ring);

  return(0);
}
Example #2
0
static int pfring_daq_set_filter(void *handle, const char *filter) {
  Pfring_Context_t *context = (Pfring_Context_t *) handle;
  int ret, i;
  struct sfbpf_program fcode;

  if(context->ring_handles[DAQ_PF_RING_PASSIVE_DEV_IDX]) {
    if(sfbpf_compile(context->snaplen, DLT_EN10MB, &fcode,
		     filter, 0 /* 1: optimize */, htonl(context->netmask)) < 0) {
      DPE(context->errbuf, "%s: BPF state machine compilation failed!", __FUNCTION__);
      return DAQ_ERROR;
    }

    ret = DAQ_SUCCESS;
    for (i = 0; i < context->num_devices; i++) {
      if(setsockopt(pfring_get_selectable_fd(context->ring_handles[i]), 0,
		    SO_ATTACH_FILTER, &fcode, sizeof(fcode)) != 0) {
        ret = DAQ_ERROR;
      }
    }

    sfbpf_freecode(&fcode);
  } else {
    /* Just check if the filter is valid */
    if(sfbpf_compile(context->snaplen, DLT_EN10MB, &fcode,
    		     filter, 0 /* 1: optimize */, 0 /* netmask */) < 0) {
      DPE(context->errbuf, "%s: BPF state machine compilation failed!", __FUNCTION__);
      return DAQ_ERROR;
    }

    ret = DAQ_SUCCESS;

    if(context->filter_string)
      free(context->filter_string);

    context->filter_string = strdup(filter);

    if(!context->filter_string) {
      DPE(context->errbuf, "%s: Couldn't allocate memory for the filter string!",
	  __FUNCTION__);
      ret = DAQ_ERROR;
    }

    sfbpf_freecode(&fcode);
  }

  return ret;
}
Example #3
0
static int pfring_daq_acquire(void *handle, int cnt, DAQ_Analysis_Func_t callback, 
#if (DAQ_API_VERSION >= 0x00010002)
                              DAQ_Meta_Func_t metaback,
#endif
			      void *user) {
  Pfring_Context_t *context =(Pfring_Context_t *) handle;
  int ret = 0, i, current_ring_idx = context->num_devices - 1, rx_ring_idx;
  struct pollfd pfd[DAQ_PF_RING_MAX_NUM_DEVICES];
  hash_filtering_rule hash_rule;
  memset(&hash_rule, 0, sizeof(hash_rule));

  context->analysis_func = callback;
  context->breakloop = 0;

  for (i = 0; i < context->num_devices; i++)
    pfring_enable_ring(context->ring_handles[i]);

  while((!context->breakloop) && ((cnt == -1) || (cnt > 0))) {
    struct pfring_pkthdr phdr;
    DAQ_PktHdr_t hdr;
    DAQ_Verdict verdict;

    memset(&phdr, 0, sizeof(phdr));

    if(pfring_daq_reload_requested)
      pfring_daq_reload(context);

    for (i = 0; i < context->num_devices; i++) {
      current_ring_idx = (current_ring_idx + 1) % context->num_devices;

      ret = pfring_recv(context->ring_handles[current_ring_idx], &context->pkt_buffer, 0, &phdr, 0 /* Dont't wait */);

      if (ret > 0) break;
    }

    if(ret <= 0) {
      /* No packet to read: let's poll */
      int rc;

      for (i = 0; i < context->num_devices; i++) {
        pfd[i].fd = pfring_get_selectable_fd(context->ring_handles[i]);
        pfd[i].events = POLLIN;
	pfd[i].revents = 0;
      }

      rc = poll(pfd, context->num_devices, context->timeout);

      if(rc < 0) {
	if(errno == EINTR)
	  break;

	DPE(context->errbuf, "%s: Poll failed: %s(%d)", __FUNCTION__, strerror(errno), errno);
	return DAQ_ERROR;
      }
    } else {
      hdr.caplen = phdr.caplen;
      hdr.pktlen = phdr.len;
      hdr.ts = phdr.ts;
#if (DAQ_API_VERSION >= 0x00010002)
      hdr.ingress_index = phdr.extended_hdr.if_index;
      hdr.egress_index = -1;
      hdr.ingress_group = -1;
      hdr.egress_group = -1;
#else
      hdr.device_index = phdr.extended_hdr.if_index;
#endif
      hdr.flags = 0;

      rx_ring_idx = current_ring_idx;

      context->stats.packets_received++;

      verdict = context->analysis_func(user, &hdr,(u_char*)context->pkt_buffer);

      if(verdict >= MAX_DAQ_VERDICT)
	verdict = DAQ_VERDICT_PASS;

      if (phdr.extended_hdr.parsed_pkt.eth_type == 0x0806 /* ARP */ )
        verdict = DAQ_VERDICT_PASS;
      
      switch(verdict) {
      case DAQ_VERDICT_BLACKLIST: /* Block the packet and block all future packets in the same flow systemwide. */
	if (context->use_kernel_filters) {
          
	  pfring_parse_pkt(context->pkt_buffer, &phdr, 4, 0, 0);
	  /* or use pfring_recv_parsed() to force parsing. */

	  hash_rule.rule_id     = context->filter_count++;
	  hash_rule.vlan_id     = phdr.extended_hdr.parsed_pkt.vlan_id;
	  hash_rule.proto       = phdr.extended_hdr.parsed_pkt.l3_proto;
	  memcpy(&hash_rule.host_peer_a, &phdr.extended_hdr.parsed_pkt.ipv4_src, sizeof(ip_addr));
	  memcpy(&hash_rule.host_peer_b, &phdr.extended_hdr.parsed_pkt.ipv4_dst, sizeof(ip_addr));
	  hash_rule.port_peer_a = phdr.extended_hdr.parsed_pkt.l4_src_port;
	  hash_rule.port_peer_b = phdr.extended_hdr.parsed_pkt.l4_dst_port;
	  hash_rule.plugin_action.plugin_id = NO_PLUGIN_ID;

	  if (context->mode == DAQ_MODE_PASSIVE && context->num_reflector_devices > rx_ring_idx) { /* lowlevelbridge ON */
	    hash_rule.rule_action = reflect_packet_and_stop_rule_evaluation;
	    snprintf(hash_rule.reflector_device_name, REFLECTOR_NAME_LEN, "%s", context->reflector_devices[rx_ring_idx]);
	  } else {
	    hash_rule.rule_action = dont_forward_packet_and_stop_rule_evaluation;
	  }

	  pfring_handle_hash_filtering_rule(context->ring_handles[rx_ring_idx], &hash_rule, 1 /* add_rule */);

	  /* Purge rules idle (i.e. with no packet matching) for more than 1h */
	  pfring_purge_idle_hash_rules(context->ring_handles[rx_ring_idx], context->idle_rules_timeout);

#if DEBUG
	  printf("[DEBUG] %d.%d.%d.%d:%d -> %d.%d.%d.%d:%d Verdict=%d Action=%d\n",
	         hash_rule.host_peer_a.v4 >> 24 & 0xFF, hash_rule.host_peer_a.v4 >> 16 & 0xFF,
	         hash_rule.host_peer_a.v4 >>  8 & 0xFF, hash_rule.host_peer_a.v4 >>  0 & 0xFF,
	         hash_rule.port_peer_a & 0xFFFF,
	         hash_rule.host_peer_b.v4 >> 24 & 0xFF, hash_rule.host_peer_b.v4 >> 16 & 0xFF,
	         hash_rule.host_peer_b.v4 >>  8 & 0xFF, hash_rule.host_peer_b.v4 >>  0 & 0xFF,
	         hash_rule.port_peer_b & 0xFFFF,
	         verdict,
		 hash_rule.rule_action);
#endif
	}
	break;

      case DAQ_VERDICT_WHITELIST: /* Pass the packet and fastpath all future packets in the same flow systemwide. */
      case DAQ_VERDICT_IGNORE:    /* Pass the packet and fastpath all future packets in the same flow for this application. */
        /* Setting a rule for reflectiong packets when lowlevelbridge is ON could be an optimization here, 
	 * but we can't set "forward" (reflector won't work) or "reflect" (packets reflected twice) hash rules */ 
      case DAQ_VERDICT_PASS:      /* Pass the packet */
      case DAQ_VERDICT_REPLACE:   /* Pass a packet that has been modified in-place.(No resizing allowed!) */
        if (context->mode == DAQ_MODE_INLINE) {
	  pfring_daq_send_packet(context, context->ring_handles[rx_ring_idx ^ 0x1], hdr.caplen, 
				 context->ring_handles[rx_ring_idx], context->ifindexes[rx_ring_idx ^ 0x1]);
	}
	break;

      case DAQ_VERDICT_BLOCK:   /* Block the packet. */
	/* Nothing to do really */
	break;

      case MAX_DAQ_VERDICT:
	/* No way we can reach this point */
	break;
      }

      context->stats.verdicts[verdict]++;
      if(cnt > 0) cnt--;
    }
  }