Example #1
0
void telemetry_daemon(void *t_data_void)
{
  struct telemetry_data *t_data = t_data_void;
  telemetry_peer_udp_cache tpuc;

  int slen, clen, ret, rc, peers_idx, allowed, yes=1, no=0;
  int peers_idx_rr = 0, max_peers_idx = 0, peers_num = 0;
  int decoder = 0, data_decoder = 0, recv_flags = 0;
  u_int16_t port = 0;
  char *srv_proto = NULL;
  time_t now, last_udp_timeout_check;

  telemetry_peer *peer = NULL;
  telemetry_peer_z *peer_z = NULL;

#if defined ENABLE_IPV6
  struct sockaddr_storage server, client;
#else
  struct sockaddr server, client;
#endif
  struct hosts_table allow;
  struct host_addr addr;

  /* select() stuff */
  fd_set read_descs, bkp_read_descs;
  int fd, select_fd, bkp_select_fd, recalc_fds, select_num;

  /* logdump time management */
  time_t dump_refresh_deadline;
  struct timeval dump_refresh_timeout, *drt_ptr;

  if (!t_data) {
    Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon(): missing telemetry data. Terminating.\n", config.name, t_data->log_str);
    exit_all(1);
  }

  /* initial cleanups */
  reload_log_telemetry_thread = FALSE;
  memset(&server, 0, sizeof(server));
  memset(&client, 0, sizeof(client));
  memset(&allow, 0, sizeof(struct hosts_table));
  clen = sizeof(client);
  telemetry_peers_udp_cache = NULL;
  last_udp_timeout_check = FALSE;

  telemetry_misc_db = &inter_domain_misc_dbs[FUNC_TYPE_TELEMETRY];
  memset(telemetry_misc_db, 0, sizeof(telemetry_misc_structs));

  /* initialize variables */
  if (config.telemetry_port_tcp && config.telemetry_port_udp) {
    Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_port_tcp and telemetry_daemon_port_udp are mutually exclusive. Terminating.\n", config.name, t_data->log_str);
    exit_all(1);
  }
  else if (!config.telemetry_port_tcp && !config.telemetry_port_udp) {
    /* defaulting to TCP */
    port = config.telemetry_port_tcp = TELEMETRY_TCP_PORT;
    srv_proto = malloc(strlen("tcp") + 1);
    strcpy(srv_proto, "tcp");
  }
  else {
    if (config.telemetry_port_tcp) {
      port = config.telemetry_port_tcp; 
      srv_proto = malloc(strlen("tcp") + 1);
      strcpy(srv_proto, "tcp");
    }

    if (config.telemetry_port_udp) {
      port = config.telemetry_port_udp;
      srv_proto = malloc(strlen("udp") + 1);
      strcpy(srv_proto, "udp");
    }
  }

  /* socket creation for telemetry server: IPv4 only */
#if (defined ENABLE_IPV6)
  if (!config.telemetry_ip) {
    struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)&server;

    sa6->sin6_family = AF_INET6;
    sa6->sin6_port = htons(port);
    slen = sizeof(struct sockaddr_in6);
  }
#else
  if (!config.telemetry_ip) {
    struct sockaddr_in *sa4 = (struct sockaddr_in *)&server;

    sa4->sin_family = AF_INET;
    sa4->sin_addr.s_addr = htonl(0);
    sa4->sin_port = htons(port);
    slen = sizeof(struct sockaddr_in);
  }
#endif
  else {
    trim_spaces(config.telemetry_ip);
    ret = str_to_addr(config.telemetry_ip, &addr);
    if (!ret) {
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_ip value is not a valid IPv4/IPv6 address. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
    slen = addr_to_sa((struct sockaddr *)&server, &addr, port);
  }

  if (!config.telemetry_decoder) {
    Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_decoder is not specified. Terminating.\n", config.name, t_data->log_str);
    exit_all(1);
  }
  else {
    if (!strcmp(config.telemetry_decoder, "json")) decoder = TELEMETRY_DECODER_JSON;
    else if (!strcmp(config.telemetry_decoder, "zjson")) {
#if defined (HAVE_ZLIB)
      decoder = TELEMETRY_DECODER_ZJSON;
#else
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_decoder set to 'zjson' but zlib not available. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
#endif
    }
    else if (!strcmp(config.telemetry_decoder, "cisco_json")) decoder = TELEMETRY_DECODER_CISCO_JSON;
    else if (!strcmp(config.telemetry_decoder, "cisco_zjson")) {
#if defined (HAVE_ZLIB)
      decoder = TELEMETRY_DECODER_CISCO_ZJSON;
#else
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_decoder set to 'cisco_zjson' but zlib not available. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
#endif
    }
    else if (!strcmp(config.telemetry_decoder, "cisco")) decoder = TELEMETRY_DECODER_CISCO;
    else if (!strcmp(config.telemetry_decoder, "cisco_gpb")) decoder = TELEMETRY_DECODER_CISCO_GPB;
    else if (!strcmp(config.telemetry_decoder, "cisco_gpb_kv")) decoder = TELEMETRY_DECODER_CISCO_GPB_KV;
    else {
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_decoder set to unknown value. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
  }

  if (!config.telemetry_max_peers) config.telemetry_max_peers = TELEMETRY_MAX_PEERS_DEFAULT;
  Log(LOG_INFO, "INFO ( %s/%s ): maximum telemetry peers allowed: %d\n", config.name, t_data->log_str, config.telemetry_max_peers);

  if (config.telemetry_port_udp) {
    if (!config.telemetry_udp_timeout) config.telemetry_udp_timeout = TELEMETRY_UDP_TIMEOUT_DEFAULT;
    Log(LOG_INFO, "INFO ( %s/%s ): telemetry UDP peers timeout: %u\n", config.name, t_data->log_str, config.telemetry_udp_timeout);
  }

  telemetry_peers = malloc(config.telemetry_max_peers*sizeof(telemetry_peer));
  if (!telemetry_peers) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() telemetry_peers structure. Terminating.\n", config.name, t_data->log_str);
    exit_all(1);
  }
  memset(telemetry_peers, 0, config.telemetry_max_peers*sizeof(telemetry_peer));

  if (telemetry_is_zjson(decoder)) {
    telemetry_peers_z = malloc(config.telemetry_max_peers*sizeof(telemetry_peer_z));
    if (!telemetry_peers_z) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() telemetry_peers_z structure. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
    memset(telemetry_peers_z, 0, config.telemetry_max_peers*sizeof(telemetry_peer_z));
  }

  if (config.telemetry_port_udp) {
    telemetry_peers_udp_timeout = malloc(config.telemetry_max_peers*sizeof(telemetry_peer_udp_timeout));
    if (!telemetry_peers_udp_timeout) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() telemetry_peers_udp_timeout structure. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
    memset(telemetry_peers_udp_timeout, 0, config.telemetry_max_peers*sizeof(telemetry_peer_udp_timeout));
  }

  if (config.telemetry_msglog_file || config.telemetry_msglog_amqp_routing_key || config.telemetry_msglog_kafka_topic) {
    if (config.telemetry_msglog_file) telemetry_misc_db->msglog_backend_methods++;
    if (config.telemetry_msglog_amqp_routing_key) telemetry_misc_db->msglog_backend_methods++;
    if (config.telemetry_msglog_kafka_topic) telemetry_misc_db->msglog_backend_methods++;

    if (telemetry_misc_db->msglog_backend_methods > 1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_daemon_msglog_file, telemetry_daemon_msglog_amqp_routing_key and telemetry_daemon_msglog_kafka_topic are mutually exclusive. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
  }

  if (config.telemetry_dump_file || config.telemetry_dump_amqp_routing_key || config.telemetry_dump_kafka_topic) {
    if (config.telemetry_dump_file) telemetry_misc_db->dump_backend_methods++;
    if (config.telemetry_dump_amqp_routing_key) telemetry_misc_db->dump_backend_methods++;
    if (config.telemetry_dump_kafka_topic) telemetry_misc_db->dump_backend_methods++;

    if (telemetry_misc_db->dump_backend_methods > 1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): telemetry_dump_file, telemetry_dump_amqp_routing_key and telemetry_dump_kafka_topic are mutually exclusive. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
  }

  if (telemetry_misc_db->msglog_backend_methods) {
    telemetry_misc_db->peers_log = malloc(config.telemetry_max_peers*sizeof(telemetry_peer_log));
    if (!telemetry_misc_db->peers_log) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() telemetry peers_log structure. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
    memset(telemetry_misc_db->peers_log, 0, config.telemetry_max_peers*sizeof(telemetry_peer_log));
    telemetry_peer_log_seq_init(&telemetry_misc_db->log_seq);

    if (config.telemetry_msglog_amqp_routing_key) {
#ifdef WITH_RABBITMQ
      telemetry_daemon_msglog_init_amqp_host();
      p_amqp_connect_to_publish(&telemetry_daemon_msglog_amqp_host);

      if (!config.telemetry_msglog_amqp_retry)
        config.telemetry_msglog_amqp_retry = AMQP_DEFAULT_RETRY;
#else
      Log(LOG_WARNING, "WARN ( %s/%s ): p_amqp_connect_to_publish() not possible due to missing --enable-rabbitmq\n", config.name, t_data->log_str);
#endif
    }

    if (config.telemetry_msglog_kafka_topic) {
#ifdef WITH_KAFKA
      telemetry_daemon_msglog_init_kafka_host();
#else
      Log(LOG_WARNING, "WARN ( %s/%s ): p_kafka_connect_to_produce() not possible due to missing --enable-kafka\n", config.name, t_data->log_str);
#endif
    }
  }

  if (config.telemetry_port_tcp) config.telemetry_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_STREAM, 0);
  else if (config.telemetry_port_udp) config.telemetry_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_DGRAM, 0);

  if (config.telemetry_sock < 0) {
#if (defined ENABLE_IPV6)
    /* retry with IPv4 */
    if (!config.telemetry_ip) {
      struct sockaddr_in *sa4 = (struct sockaddr_in *)&server;

      sa4->sin_family = AF_INET;
      sa4->sin_addr.s_addr = htonl(0);
      sa4->sin_port = htons(port);
      slen = sizeof(struct sockaddr_in);

      if (config.telemetry_port_tcp) config.telemetry_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_STREAM, 0);
      else if (config.telemetry_port_udp) config.telemetry_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_DGRAM, 0);
    }
#endif

    if (config.telemetry_sock < 0) {
      Log(LOG_ERR, "ERROR ( %s/%s ): socket() failed. Terminating.\n", config.name, t_data->log_str);
      exit_all(1);
    }
  }

  if (config.telemetry_ipprec) {
    int opt = config.telemetry_ipprec << 5;

    rc = setsockopt(config.telemetry_sock, IPPROTO_IP, IP_TOS, &opt, sizeof(opt));
    if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for IP_TOS (errno: %d).\n", config.name, t_data->log_str, errno);
  }

  rc = setsockopt(config.telemetry_sock, SOL_SOCKET, SO_REUSEADDR, (char *)&yes, sizeof(yes));
  if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for SO_REUSEADDR (errno: %d).\n", config.name, t_data->log_str, errno);

#if (defined ENABLE_IPV6) && (defined IPV6_BINDV6ONLY)
  rc = setsockopt(config.telemetry_sock, IPPROTO_IPV6, IPV6_BINDV6ONLY, (char *) &no, (socklen_t) sizeof(no));
  if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for IPV6_BINDV6ONLY (errno: %d).\n", config.name, t_data->log_str, errno);
#endif

  if (config.telemetry_pipe_size) {
    int l = sizeof(config.telemetry_pipe_size);
    int saved = 0, obtained = 0;

    getsockopt(config.telemetry_sock, SOL_SOCKET, SO_RCVBUF, &saved, &l);
    Setsocksize(config.telemetry_sock, SOL_SOCKET, SO_RCVBUF, &config.telemetry_pipe_size, sizeof(config.telemetry_pipe_size));
    getsockopt(config.telemetry_sock, SOL_SOCKET, SO_RCVBUF, &obtained, &l);

    Setsocksize(config.telemetry_sock, SOL_SOCKET, SO_RCVBUF, &saved, l);
    getsockopt(config.telemetry_sock, SOL_SOCKET, SO_RCVBUF, &obtained, &l);
    Log(LOG_INFO, "INFO ( %s/%s ): telemetry_daemon_pipe_size: obtained=%d target=%d.\n",
	config.name, t_data->log_str, obtained, config.telemetry_pipe_size);
  }

  rc = bind(config.telemetry_sock, (struct sockaddr *) &server, slen);
  if (rc < 0) {
    char null_ip_address[] = "0.0.0.0";
    char *ip_address;

    ip_address = config.telemetry_ip ? config.telemetry_ip : null_ip_address;
    Log(LOG_ERR, "ERROR ( %s/%s ): bind() to ip=%s port=%u/%s failed (errno: %d).\n",
	config.name, t_data->log_str, ip_address, port, srv_proto, errno);
    exit_all(1);
  }

  if (config.telemetry_port_tcp) {
    rc = listen(config.telemetry_sock, 1);
    if (rc < 0) {
      Log(LOG_ERR, "ERROR ( %s/%s ): listen() failed (errno: %d).\n", config.name, t_data->log_str, errno);
      exit_all(1);
    }
  }

  /* Preparing for syncronous I/O multiplexing */
  select_fd = 0;
  FD_ZERO(&bkp_read_descs);
  FD_SET(config.telemetry_sock, &bkp_read_descs);

  {
    char srv_string[INET6_ADDRSTRLEN];
    struct host_addr srv_addr;
    u_int16_t srv_port;

    sa_to_addr(&server, &srv_addr, &srv_port);
    addr_to_str(srv_string, &srv_addr);
    Log(LOG_INFO, "INFO ( %s/%s ): waiting for telemetry data on %s:%u/%s\n", config.name, t_data->log_str, srv_string, srv_port, srv_proto);
  }

  /* Preparing ACL, if any */
  if (config.telemetry_allow_file) load_allow_file(config.telemetry_allow_file, &allow);

  if (telemetry_misc_db->msglog_backend_methods) {
#ifdef WITH_JANSSON
    if (!config.telemetry_msglog_output) config.telemetry_msglog_output = PRINT_OUTPUT_JSON;
#else
    Log(LOG_WARNING, "WARN ( %s/%s ): telemetry_daemon_msglog_output set to json but will produce no output (missing --enable-jansson).\n", config.name, t_data->log_str);
#endif
  }

  if (telemetry_misc_db->dump_backend_methods) {
#ifdef WITH_JANSSON
    if (!config.telemetry_dump_output) config.telemetry_dump_output = PRINT_OUTPUT_JSON;
#else
    Log(LOG_WARNING, "WARN ( %s/%s ): telemetry_table_dump_output set to json but will produce no output (missing --enable-jansson).\n", config.name, t_data->log_str);
#endif
  }

  if (telemetry_misc_db->dump_backend_methods) {
    char dump_roundoff[] = "m";
    time_t tmp_time;

    if (config.telemetry_dump_refresh_time) {
      gettimeofday(&telemetry_misc_db->log_tstamp, NULL);
      dump_refresh_deadline = telemetry_misc_db->log_tstamp.tv_sec;
      tmp_time = roundoff_time(dump_refresh_deadline, dump_roundoff);
      while ((tmp_time+config.telemetry_dump_refresh_time) < dump_refresh_deadline) {
        tmp_time += config.telemetry_dump_refresh_time;
      }
      dump_refresh_deadline = tmp_time;
      dump_refresh_deadline += config.telemetry_dump_refresh_time; /* it's a deadline not a basetime */
    }
    else {
      config.telemetry_dump_file = NULL;
      telemetry_misc_db->dump_backend_methods = FALSE;
      Log(LOG_WARNING, "WARN ( %s/%s ): Invalid 'telemetry_dump_refresh_time'.\n", config.name, t_data->log_str);
    }

    if (config.telemetry_dump_amqp_routing_key) telemetry_dump_init_amqp_host();
    if (config.telemetry_dump_kafka_topic) telemetry_dump_init_kafka_host();
  }

  select_fd = bkp_select_fd = (config.telemetry_sock + 1);
  recalc_fds = FALSE;

  telemetry_link_misc_structs(telemetry_misc_db);

  for (;;) {
    select_again:

    if (recalc_fds) {
      select_fd = config.telemetry_sock;
      max_peers_idx = -1; /* .. since valid indexes include 0 */

      for (peers_idx = 0, peers_num = 0; peers_idx < config.telemetry_max_peers; peers_idx++) {
        if (select_fd < telemetry_peers[peers_idx].fd) select_fd = telemetry_peers[peers_idx].fd;
        if (telemetry_peers[peers_idx].fd) {
	  max_peers_idx = peers_idx;
	  peers_num++;
	}
      }
      select_fd++;
      max_peers_idx++;

      bkp_select_fd = select_fd;
      recalc_fds = FALSE;
    }
    else select_fd = bkp_select_fd;

    memcpy(&read_descs, &bkp_read_descs, sizeof(bkp_read_descs));
    if (telemetry_misc_db->dump_backend_methods) {
      int delta;

      calc_refresh_timeout_sec(dump_refresh_deadline, telemetry_misc_db->log_tstamp.tv_sec, &delta);
      dump_refresh_timeout.tv_sec = delta;
      dump_refresh_timeout.tv_usec = 0;
      drt_ptr = &dump_refresh_timeout;
    }
    else drt_ptr = NULL;

    select_num = select(select_fd, &read_descs, NULL, NULL, drt_ptr);
    if (select_num < 0) goto select_again;

    // XXX: UDP case: timeout handling (to be tested)
    if (config.telemetry_port_udp) {
      now = time(NULL);

      if (now > (last_udp_timeout_check + TELEMETRY_UDP_TIMEOUT_INTERVAL)) {
	for (peers_idx = 0; peers_idx < config.telemetry_max_peers; peers_idx++) {
	  telemetry_peer_udp_timeout *peer_udp_timeout;

	  peer = &telemetry_peers[peers_idx];
	  peer_z = &telemetry_peers_z[peers_idx];
	  peer_udp_timeout = &telemetry_peers_udp_timeout[peers_idx];

	  if (peer->fd) {
	    if (now > (peer_udp_timeout->last_msg + config.telemetry_udp_timeout)) {
	      Log(LOG_INFO, "INFO ( %s/%s ): [%s] telemetry UDP peer removed (timeout).\n", config.name, t_data->log_str, peer->addr_str);
	      telemetry_peer_close(peer, FUNC_TYPE_TELEMETRY);
	      if (telemetry_is_zjson(decoder)) telemetry_peer_z_close(peer_z);
	      recalc_fds = TRUE;
	    }
	  }
	}
      }
    }

    if (reload_log_telemetry_thread) {
      for (peers_idx = 0; peers_idx < config.telemetry_max_peers; peers_idx++) {
        if (telemetry_misc_db->peers_log[peers_idx].fd) {
          fclose(telemetry_misc_db->peers_log[peers_idx].fd);
          telemetry_misc_db->peers_log[peers_idx].fd = open_output_file(telemetry_misc_db->peers_log[peers_idx].filename, "a", FALSE);
          setlinebuf(telemetry_misc_db->peers_log[peers_idx].fd);
        }
        else break;
      }
    }

    if (telemetry_misc_db->msglog_backend_methods || telemetry_misc_db->dump_backend_methods) {
      gettimeofday(&telemetry_misc_db->log_tstamp, NULL);
      compose_timestamp(telemetry_misc_db->log_tstamp_str, SRVBUFLEN, &telemetry_misc_db->log_tstamp, TRUE, config.timestamps_since_epoch);

      if (telemetry_misc_db->dump_backend_methods) {
        while (telemetry_misc_db->log_tstamp.tv_sec > dump_refresh_deadline) {
          telemetry_handle_dump_event(t_data);
          dump_refresh_deadline += config.telemetry_dump_refresh_time;
        }
      }

#ifdef WITH_RABBITMQ
      if (config.telemetry_msglog_amqp_routing_key) {
        time_t last_fail = P_broker_timers_get_last_fail(&telemetry_daemon_msglog_amqp_host.btimers);

        if (last_fail && ((last_fail + P_broker_timers_get_retry_interval(&telemetry_daemon_msglog_amqp_host.btimers)) <= telemetry_misc_db->log_tstamp.tv_sec)) {
          telemetry_daemon_msglog_init_amqp_host();
          p_amqp_connect_to_publish(&telemetry_daemon_msglog_amqp_host);
        }
      }
#endif

#ifdef WITH_KAFKA
      if (config.telemetry_msglog_kafka_topic) {
        time_t last_fail = P_broker_timers_get_last_fail(&telemetry_daemon_msglog_kafka_host.btimers);

        if (last_fail && ((last_fail + P_broker_timers_get_retry_interval(&telemetry_daemon_msglog_kafka_host.btimers)) <= telemetry_misc_db->log_tstamp.tv_sec))
          telemetry_daemon_msglog_init_kafka_host();
      }
#endif
    }

    /* 
       If select_num == 0 then we got out of select() due to a timeout rather
       than because we had a message from a peeer to handle. By now we did all
       routine checks and can happily return to selet() again.
    */
    if (!select_num) goto select_again;

    /* New connection is coming in */
    if (FD_ISSET(config.telemetry_sock, &read_descs)) {
      if (config.telemetry_port_tcp) {
        fd = accept(config.telemetry_sock, (struct sockaddr *) &client, &clen);
        if (fd == ERR) goto read_data;
      }
      else if (config.telemetry_port_udp) {
	char dummy_local_buf[TRUE];	

	ret = recvfrom(config.telemetry_sock, dummy_local_buf, TRUE, MSG_PEEK, (struct sockaddr *) &client, &clen);
	if (ret <= 0) goto select_again;
	else fd = config.telemetry_sock;
      }

#if defined ENABLE_IPV6
      ipv4_mapped_to_ipv4(&client);
#endif

      /* If an ACL is defined, here we check against and enforce it */
      if (allow.num) allowed = check_allow(&allow, (struct sockaddr *)&client);
      else allowed = TRUE;

      if (!allowed) {
        if (config.telemetry_port_tcp) close(fd);
        goto read_data;
      }

      /* XXX: UDP case may be optimized further */
      if (config.telemetry_port_udp) {
	telemetry_peer_udp_cache *tpuc_ret;
	u_int16_t client_port;

        sa_to_addr(&client, &tpuc.addr, &client_port);
	tpuc_ret = pm_tfind(&tpuc, &telemetry_peers_udp_cache, telemetry_tpuc_addr_cmp);

	if (tpuc_ret) {
	  peer = &telemetry_peers[tpuc_ret->index];
	  telemetry_peers_udp_timeout[tpuc_ret->index].last_msg = now;

	  goto read_data;
	}
      }

      for (peer = NULL, peers_idx = 0; peers_idx < config.telemetry_max_peers; peers_idx++) {
        if (!telemetry_peers[peers_idx].fd) {
	  peer = &telemetry_peers[peers_idx];

	  if (telemetry_peer_init(peer, FUNC_TYPE_TELEMETRY)) peer = NULL;

	  if (telemetry_is_zjson(decoder)) {
	    peer_z = &telemetry_peers_z[peers_idx];
	    if (telemetry_peer_z_init(peer_z)) {
	      peer = NULL;
	      peer_z = NULL;
	    }
	  }

	  if (peer) {
	    recalc_fds = TRUE;
	
	    if (config.telemetry_port_udp) {
	      tpuc.index = peers_idx;
	      telemetry_peers_udp_timeout[peers_idx].last_msg = now;

	      if (!pm_tsearch(&tpuc, &telemetry_peers_udp_cache, telemetry_tpuc_addr_cmp, sizeof(telemetry_peer_udp_cache)))
		Log(LOG_WARNING, "WARN ( %s/%s ): tsearch() unable to insert in UDP peers cache.\n", config.name, t_data->log_str);
	    }
	  }

	  break;
	}
      }

      if (!peer) {
        /* We briefly accept the new connection to be able to drop it */
        Log(LOG_ERR, "ERROR ( %s/%s ): Insufficient number of telemetry peers has been configured by telemetry_max_peers (%d).\n",
                        config.name, t_data->log_str, config.telemetry_max_peers);
        if (config.telemetry_port_tcp) close(fd);
        goto read_data;
      }

      peer->fd = fd;
      if (config.telemetry_port_tcp) FD_SET(peer->fd, &bkp_read_descs);
      peer->addr.family = ((struct sockaddr *)&client)->sa_family;
      if (peer->addr.family == AF_INET) {
        peer->addr.address.ipv4.s_addr = ((struct sockaddr_in *)&client)->sin_addr.s_addr;
        peer->tcp_port = ntohs(((struct sockaddr_in *)&client)->sin_port);
      }
#if defined ENABLE_IPV6
      else if (peer->addr.family == AF_INET6) {
        memcpy(&peer->addr.address.ipv6, &((struct sockaddr_in6 *)&client)->sin6_addr, 16);
        peer->tcp_port = ntohs(((struct sockaddr_in6 *)&client)->sin6_port);
      }
#endif
      addr_to_str(peer->addr_str, &peer->addr);

      if (telemetry_misc_db->msglog_backend_methods)
        telemetry_peer_log_init(peer, config.telemetry_msglog_output, FUNC_TYPE_TELEMETRY);

      if (telemetry_misc_db->dump_backend_methods)
        telemetry_dump_init_peer(peer);

      peers_num++;
      Log(LOG_INFO, "INFO ( %s/%s ): [%s] telemetry peers usage: %u/%u\n",
	  config.name, t_data->log_str, peer->addr_str, peers_num, config.telemetry_max_peers);
    }

    read_data:

    /*
       We have something coming in: let's lookup which peer is that.
       FvD: To avoid starvation of the "later established" peers, we
       offset the start of the search in a round-robin style.
    */
    if (config.telemetry_port_tcp) {
      for (peer = NULL, peers_idx = 0; peers_idx < max_peers_idx; peers_idx++) {
        int loc_idx = (peers_idx + peers_idx_rr) % max_peers_idx;

        if (telemetry_peers[loc_idx].fd && FD_ISSET(telemetry_peers[loc_idx].fd, &read_descs)) {
          peer = &telemetry_peers[loc_idx];

	  if (telemetry_is_zjson(decoder)) peer_z = &telemetry_peers_z[loc_idx];

          peers_idx_rr = (peers_idx_rr + 1) % max_peers_idx;
          break;
        }
      }
    }

    if (!peer) goto select_again;

    recv_flags = 0;

    switch (decoder) {
    case TELEMETRY_DECODER_JSON:
      ret = telemetry_recv_json(peer, 0, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_JSON;
      break;
    case TELEMETRY_DECODER_ZJSON:
      ret = telemetry_recv_zjson(peer, peer_z, 0, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_JSON;
      break;
    case TELEMETRY_DECODER_CISCO:
      ret = telemetry_recv_cisco(peer, &recv_flags, &data_decoder);
      break;
    case TELEMETRY_DECODER_CISCO_JSON:
      ret = telemetry_recv_cisco_json(peer, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_JSON;
      break;
    case TELEMETRY_DECODER_CISCO_ZJSON:
      ret = telemetry_recv_cisco_zjson(peer, peer_z, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_JSON;
      break;
    case TELEMETRY_DECODER_CISCO_GPB:
      ret = telemetry_recv_cisco_gpb(peer, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_GPB;
      break;
    case TELEMETRY_DECODER_CISCO_GPB_KV:
      ret = telemetry_recv_cisco_gpb_kv(peer, &recv_flags);
      data_decoder = TELEMETRY_DATA_DECODER_GPB;
      break;
    default:
      ret = TRUE; recv_flags = ERR;
      data_decoder = TELEMETRY_DATA_DECODER_UNKNOWN;
      break;
    }

    if (ret <= 0) {
      Log(LOG_INFO, "INFO ( %s/%s ): [%s] connection reset by peer (%d).\n", config.name, t_data->log_str, peer->addr_str, errno);
      FD_CLR(peer->fd, &bkp_read_descs);
      telemetry_peer_close(peer, FUNC_TYPE_TELEMETRY);
      if (telemetry_is_zjson(decoder)) telemetry_peer_z_close(peer_z);
      recalc_fds = TRUE;
    }
    else {
      if (recv_flags != ERR) telemetry_process_data(peer, t_data, data_decoder);
    }
  }
}
Example #2
0
void skinny_bmp_daemon()
{
  int slen, clen, ret, rc, peers_idx, allowed, yes=1, no=0;
  int peers_idx_rr = 0, max_peers_idx = 0;
  u_int32_t pkt_remaining_len=0;
  time_t now;
  afi_t afi;
  safi_t safi;

  struct bmp_peer *bmpp = NULL;
  struct bgp_peer *peer = NULL;

#if defined ENABLE_IPV6
  struct sockaddr_storage server, client;
#else
  struct sockaddr server, client;
#endif
  struct hosts_table allow;
  struct host_addr addr;
  struct bgp_peer_batch bp_batch;

  /* select() stuff */
  fd_set read_descs, bkp_read_descs;
  int fd, select_fd, bkp_select_fd, recalc_fds, select_num;

  /* logdump time management */
  time_t dump_refresh_deadline;
  struct timeval dump_refresh_timeout, *drt_ptr;


  /* initial cleanups */
  reload_map_bmp_thread = FALSE;
  reload_log_bmp_thread = FALSE;
  memset(&server, 0, sizeof(server));
  memset(&client, 0, sizeof(client));
  memset(&allow, 0, sizeof(struct hosts_table));
  clen = sizeof(client);

  bmp_routing_db = &inter_domain_routing_dbs[FUNC_TYPE_BMP];
  memset(bmp_routing_db, 0, sizeof(struct bgp_rt_structs));

  /* socket creation for BMP server: IPv4 only */
#if (defined ENABLE_IPV6)
  if (!config.nfacctd_bmp_ip) {
    struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)&server;

    sa6->sin6_family = AF_INET6;
    sa6->sin6_port = htons(config.nfacctd_bmp_port);
    slen = sizeof(struct sockaddr_in6);
  }
#else
  if (!config.nfacctd_bmp_ip) {
    struct sockaddr_in *sa4 = (struct sockaddr_in *)&server;

    sa4->sin_family = AF_INET;
    sa4->sin_addr.s_addr = htonl(0);
    sa4->sin_port = htons(config.nfacctd_bmp_port);
    slen = sizeof(struct sockaddr_in);
  }
#endif
  else {
    trim_spaces(config.nfacctd_bmp_ip);
    ret = str_to_addr(config.nfacctd_bmp_ip, &addr);
    if (!ret) {
      Log(LOG_ERR, "ERROR ( %s/%s ): 'bmp_daemon_ip' value is not a valid IPv4/IPv6 address. Terminating thread.\n", config.name, bmp_misc_db->log_str);
      exit_all(1);
    }
    slen = addr_to_sa((struct sockaddr *)&server, &addr, config.nfacctd_bmp_port);
  }

  if (!config.nfacctd_bmp_max_peers) config.nfacctd_bmp_max_peers = BMP_MAX_PEERS_DEFAULT;
  Log(LOG_INFO, "INFO ( %s/%s ): maximum BMP peers allowed: %d\n", config.name, bmp_misc_db->log_str, config.nfacctd_bmp_max_peers);

  bmp_peers = malloc(config.nfacctd_bmp_max_peers*sizeof(struct bmp_peer));
  if (!bmp_peers) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() BMP peers structure. Terminating thread.\n", config.name, bmp_misc_db->log_str);
    exit_all(1);
  }
  memset(bmp_peers, 0, config.nfacctd_bmp_max_peers*sizeof(struct bmp_peer));

  if (config.nfacctd_bmp_msglog_file || config.nfacctd_bmp_msglog_amqp_routing_key || config.nfacctd_bmp_msglog_kafka_topic) {
    if (config.nfacctd_bmp_msglog_file) bmp_misc_db->msglog_backend_methods++;
    if (config.nfacctd_bmp_msglog_amqp_routing_key) bmp_misc_db->msglog_backend_methods++;
    if (config.nfacctd_bmp_msglog_kafka_topic) bmp_misc_db->msglog_backend_methods++;

    if (bmp_misc_db->msglog_backend_methods > 1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): bmp_daemon_msglog_file, bmp_daemon_msglog_amqp_routing_key and bmp_daemon_msglog_kafka_topic are mutually exclusive. Terminating thread.\n", config.name, bmp_misc_db->log_str);
      exit_all(1);
    }
  }

  if (config.bmp_dump_file || config.bmp_dump_amqp_routing_key || config.bmp_dump_kafka_topic) {
    if (config.bmp_dump_file) bmp_misc_db->dump_backend_methods++;
    if (config.bmp_dump_amqp_routing_key) bmp_misc_db->dump_backend_methods++;
    if (config.bmp_dump_kafka_topic) bmp_misc_db->dump_backend_methods++;

    if (bmp_misc_db->dump_backend_methods > 1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): bmp_dump_file, bmp_dump_amqp_routing_key and bmp_dump_kafka_topic are mutually exclusive. Terminating thread.\n", config.name, bmp_misc_db->log_str);
      exit_all(1);
    }
  }

  if (bmp_misc_db->msglog_backend_methods || bmp_misc_db->dump_backend_methods)
    bgp_peer_log_seq_init(&bmp_misc_db->log_seq);

  if (bmp_misc_db->msglog_backend_methods) {
    bmp_misc_db->peers_log = malloc(config.nfacctd_bmp_max_peers*sizeof(struct bgp_peer_log));
    if (!bmp_misc_db->peers_log) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() BMP peers log structure. Terminating thread.\n", config.name, bmp_misc_db->log_str);
      exit_all(1);
    }
    memset(bmp_misc_db->peers_log, 0, config.nfacctd_bmp_max_peers*sizeof(struct bgp_peer_log));

    if (config.nfacctd_bmp_msglog_amqp_routing_key) {
#ifdef WITH_RABBITMQ
      bmp_daemon_msglog_init_amqp_host();
      p_amqp_connect_to_publish(&bmp_daemon_msglog_amqp_host);

      if (!config.nfacctd_bmp_msglog_amqp_retry)
        config.nfacctd_bmp_msglog_amqp_retry = AMQP_DEFAULT_RETRY;
#else
      Log(LOG_WARNING, "WARN ( %s/%s ): p_amqp_connect_to_publish() not possible due to missing --enable-rabbitmq\n", config.name, bmp_misc_db->log_str);
#endif
    }

    if (config.nfacctd_bmp_msglog_kafka_topic) {
#ifdef WITH_KAFKA
      bmp_daemon_msglog_init_kafka_host();
#else
      Log(LOG_WARNING, "WARN ( %s/%s ): p_kafka_connect_to_produce() not possible due to missing --enable-kafka\n", config.name, bmp_misc_db->log_str);
#endif
    }
  }

  if (!config.bmp_table_attr_hash_buckets) config.bmp_table_attr_hash_buckets = HASHTABSIZE;
  bgp_attr_init(config.bmp_table_attr_hash_buckets, bmp_routing_db);

  if (!config.bmp_table_peer_buckets) config.bmp_table_peer_buckets = DEFAULT_BGP_INFO_HASH;
  if (!config.bmp_table_per_peer_buckets) config.bmp_table_per_peer_buckets = DEFAULT_BGP_INFO_PER_PEER_HASH;

  if (config.bmp_table_per_peer_hash == BGP_ASPATH_HASH_PATHID)
    bmp_route_info_modulo = bmp_route_info_modulo_pathid;
  else {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unknown 'bmp_table_per_peer_hash' value. Terminating thread.\n", config.name, bmp_misc_db->log_str);
    exit_all(1);
  }

  config.bmp_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_STREAM, 0);
  if (config.bmp_sock < 0) {
#if (defined ENABLE_IPV6)
    /* retry with IPv4 */
    if (!config.nfacctd_bmp_ip) {
      struct sockaddr_in *sa4 = (struct sockaddr_in *)&server;

      sa4->sin_family = AF_INET;
      sa4->sin_addr.s_addr = htonl(0);
      sa4->sin_port = htons(config.nfacctd_bmp_port);
      slen = sizeof(struct sockaddr_in);

      config.bmp_sock = socket(((struct sockaddr *)&server)->sa_family, SOCK_STREAM, 0);
    }
#endif

    if (config.bmp_sock < 0) {
      Log(LOG_ERR, "ERROR ( %s/%s ): thread socket() failed. Terminating thread.\n", config.name, bmp_misc_db->log_str);
      exit_all(1);
    }
  }
  if (config.nfacctd_bmp_ipprec) {
    int opt = config.nfacctd_bmp_ipprec << 5;

    rc = setsockopt(config.bmp_sock, IPPROTO_IP, IP_TOS, &opt, sizeof(opt));
    if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for IP_TOS (errno: %d).\n", config.name, bmp_misc_db->log_str, errno);
  }

  rc = setsockopt(config.bmp_sock, SOL_SOCKET, SO_REUSEADDR, (char *)&yes, sizeof(yes));
  if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for SO_REUSEADDR (errno: %d).\n", config.name, bmp_misc_db->log_str, errno);

#if (defined ENABLE_IPV6) && (defined IPV6_BINDV6ONLY)
  rc = setsockopt(config.bmp_sock, IPPROTO_IPV6, IPV6_BINDV6ONLY, (char *) &no, (socklen_t) sizeof(no));
  if (rc < 0) Log(LOG_ERR, "WARN ( %s/%s ): setsockopt() failed for IPV6_BINDV6ONLY (errno: %d).\n", config.name, bmp_misc_db->log_str, errno);
#endif

  if (config.nfacctd_bmp_pipe_size) {
    int l = sizeof(config.nfacctd_bmp_pipe_size);
    int saved = 0, obtained = 0;

    getsockopt(config.bmp_sock, SOL_SOCKET, SO_RCVBUF, &saved, &l);
    Setsocksize(config.bmp_sock, SOL_SOCKET, SO_RCVBUF, &config.nfacctd_bmp_pipe_size, sizeof(config.nfacctd_bmp_pipe_size));
    getsockopt(config.bmp_sock, SOL_SOCKET, SO_RCVBUF, &obtained, &l);

    Setsocksize(config.bmp_sock, SOL_SOCKET, SO_RCVBUF, &saved, l);
    getsockopt(config.bmp_sock, SOL_SOCKET, SO_RCVBUF, &obtained, &l);
    Log(LOG_INFO, "INFO ( %s/%s ): bmp_daemon_pipe_size: obtained=%d target=%d.\n", config.name, bmp_misc_db->log_str, obtained, config.nfacctd_bmp_pipe_size);
  }

  rc = bind(config.bmp_sock, (struct sockaddr *) &server, slen);
  if (rc < 0) {
    char null_ip_address[] = "0.0.0.0";
    char *ip_address;

    ip_address = config.nfacctd_bmp_ip ? config.nfacctd_bmp_ip : null_ip_address;
    Log(LOG_ERR, "ERROR ( %s/%s ): bind() to ip=%s port=%d/tcp failed (errno: %d).\n", config.name, bmp_misc_db->log_str, ip_address, config.nfacctd_bmp_port, errno);
    exit_all(1);
  }

  rc = listen(config.bmp_sock, 1);
  if (rc < 0) {
    Log(LOG_ERR, "ERROR ( %s/%s ): listen() failed (errno: %d).\n", config.name, bmp_misc_db->log_str, errno);
    exit_all(1);
  }

  /* Preparing for syncronous I/O multiplexing */
  select_fd = 0;
  FD_ZERO(&bkp_read_descs);
  FD_SET(config.bmp_sock, &bkp_read_descs);

  {
    char srv_string[INET6_ADDRSTRLEN];
    struct host_addr srv_addr;
    u_int16_t srv_port;

    sa_to_addr((struct sockaddr *)&server, &srv_addr, &srv_port);
    addr_to_str(srv_string, &srv_addr);
    Log(LOG_INFO, "INFO ( %s/%s ): waiting for BMP data on %s:%u\n", config.name, bmp_misc_db->log_str, srv_string, srv_port);
  }

  /* Preparing ACL, if any */
  if (config.nfacctd_bmp_allow_file) load_allow_file(config.nfacctd_bmp_allow_file, &allow);

  /* Let's initialize clean shared RIB */
  for (afi = AFI_IP; afi < AFI_MAX; afi++) {
    for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
      bmp_routing_db->rib[afi][safi] = bgp_table_init(afi, safi);
    }
  }

  /* BMP peers batching checks */
  if ((config.nfacctd_bmp_batch && !config.nfacctd_bmp_batch_interval) ||
      (config.nfacctd_bmp_batch_interval && !config.nfacctd_bmp_batch)) {
    Log(LOG_WARNING, "WARN ( %s/%s ): 'bmp_daemon_batch_interval' and 'bmp_daemon_batch' both set to zero.\n", config.name, bmp_misc_db->log_str);
    config.nfacctd_bmp_batch = 0;
    config.nfacctd_bmp_batch_interval = 0;
  }
  else bgp_batch_init(&bp_batch, config.nfacctd_bmp_batch, config.nfacctd_bmp_batch_interval);

  if (bmp_misc_db->msglog_backend_methods) {
#ifdef WITH_JANSSON
    if (!config.nfacctd_bmp_msglog_output) config.nfacctd_bmp_msglog_output = PRINT_OUTPUT_JSON;
#else
    Log(LOG_WARNING, "WARN ( %s/%s ): bmp_daemon_msglog_output set to json but will produce no output (missing --enable-jansson).\n", config.name, bmp_misc_db->log_str);
#endif
  }

  if (bmp_misc_db->dump_backend_methods) {
#ifdef WITH_JANSSON
    if (!config.bmp_dump_output) config.bmp_dump_output = PRINT_OUTPUT_JSON;
#else
    Log(LOG_WARNING, "WARN ( %s/%s ): bmp_table_dump_output set to json but will produce no output (missing --enable-jansson).\n", config.name, bmp_misc_db->log_str);
#endif
  }

  if (bmp_misc_db->dump_backend_methods) {
    char dump_roundoff[] = "m";
    time_t tmp_time;

    if (config.bmp_dump_refresh_time) {
      gettimeofday(&bmp_misc_db->log_tstamp, NULL);
      dump_refresh_deadline = bmp_misc_db->log_tstamp.tv_sec;
      tmp_time = roundoff_time(dump_refresh_deadline, dump_roundoff);
      while ((tmp_time+config.bmp_dump_refresh_time) < dump_refresh_deadline) {
        tmp_time += config.bmp_dump_refresh_time;
      }
      dump_refresh_deadline = tmp_time;
      dump_refresh_deadline += config.bmp_dump_refresh_time; /* it's a deadline not a basetime */
    }
    else {
      config.bmp_dump_file = NULL;
      bmp_misc_db->dump_backend_methods = FALSE;
      Log(LOG_WARNING, "WARN ( %s/%s ): Invalid 'bmp_dump_refresh_time'.\n", config.name, bmp_misc_db->log_str);
    }

    if (config.bmp_dump_amqp_routing_key) bmp_dump_init_amqp_host();
    if (config.bmp_dump_kafka_topic) bmp_dump_init_kafka_host();
  }

  select_fd = bkp_select_fd = (config.bmp_sock + 1);
  recalc_fds = FALSE;

  bmp_link_misc_structs(bmp_misc_db);

  for (;;) {
    select_again:

    if (recalc_fds) {
      select_fd = config.bmp_sock;
      max_peers_idx = -1; /* .. since valid indexes include 0 */

      for (peers_idx = 0; peers_idx < config.nfacctd_bmp_max_peers; peers_idx++) {
        if (select_fd < bmp_peers[peers_idx].self.fd) select_fd = bmp_peers[peers_idx].self.fd;
        if (bmp_peers[peers_idx].self.fd) max_peers_idx = peers_idx;
      }
      select_fd++;
      max_peers_idx++;

      bkp_select_fd = select_fd;
      recalc_fds = FALSE;
    }
    else select_fd = bkp_select_fd;

    memcpy(&read_descs, &bkp_read_descs, sizeof(bkp_read_descs));

    if (bmp_misc_db->dump_backend_methods) {
      int delta;

      calc_refresh_timeout_sec(dump_refresh_deadline, bmp_misc_db->log_tstamp.tv_sec, &delta);
      dump_refresh_timeout.tv_sec = delta;
      dump_refresh_timeout.tv_usec = 0;
      drt_ptr = &dump_refresh_timeout;
    }
    else drt_ptr = NULL;

    select_num = select(select_fd, &read_descs, NULL, NULL, drt_ptr);
    if (select_num < 0) goto select_again;

    if (reload_map_bmp_thread) {
      if (config.nfacctd_bmp_allow_file) load_allow_file(config.nfacctd_bmp_allow_file, &allow);

      reload_map_bmp_thread = FALSE;
    }

    if (reload_log_bmp_thread) {
      for (peers_idx = 0; peers_idx < config.nfacctd_bmp_max_peers; peers_idx++) {
        if (bmp_misc_db->peers_log[peers_idx].fd) {
          fclose(bmp_misc_db->peers_log[peers_idx].fd);
          bmp_misc_db->peers_log[peers_idx].fd = open_output_file(bmp_misc_db->peers_log[peers_idx].filename, "a", FALSE);
	  setlinebuf(bmp_misc_db->peers_log[peers_idx].fd);
        }
        else break;
      }

      reload_log_bmp_thread = FALSE;
    }

    if (bmp_misc_db->msglog_backend_methods || bmp_misc_db->dump_backend_methods) {
      gettimeofday(&bmp_misc_db->log_tstamp, NULL);
      compose_timestamp(bmp_misc_db->log_tstamp_str, SRVBUFLEN, &bmp_misc_db->log_tstamp, TRUE,
			config.timestamps_since_epoch, config.timestamps_rfc3339, config.timestamps_utc);

      if (bmp_misc_db->dump_backend_methods) {
        while (bmp_misc_db->log_tstamp.tv_sec > dump_refresh_deadline) {
          bmp_misc_db->dump.tstamp.tv_sec = dump_refresh_deadline;
          bmp_misc_db->dump.tstamp.tv_usec = 0;
          compose_timestamp(bmp_misc_db->dump.tstamp_str, SRVBUFLEN, &bmp_misc_db->dump.tstamp, FALSE,
			    config.timestamps_since_epoch, config.timestamps_rfc3339, config.timestamps_utc);
	  bmp_misc_db->dump.period = config.bmp_dump_refresh_time;

          bmp_handle_dump_event();
          dump_refresh_deadline += config.bmp_dump_refresh_time;
        }
      }

#ifdef WITH_RABBITMQ
      if (config.nfacctd_bmp_msglog_amqp_routing_key) {
        time_t last_fail = P_broker_timers_get_last_fail(&bmp_daemon_msglog_amqp_host.btimers);

        if (last_fail && ((last_fail + P_broker_timers_get_retry_interval(&bmp_daemon_msglog_amqp_host.btimers)) <= bmp_misc_db->log_tstamp.tv_sec)) {
          bmp_daemon_msglog_init_amqp_host();
          p_amqp_connect_to_publish(&bmp_daemon_msglog_amqp_host);
        }
      }
#endif

#ifdef WITH_KAFKA
      if (config.nfacctd_bmp_msglog_kafka_topic) {
        time_t last_fail = P_broker_timers_get_last_fail(&bmp_daemon_msglog_kafka_host.btimers);

        if (last_fail && ((last_fail + P_broker_timers_get_retry_interval(&bmp_daemon_msglog_kafka_host.btimers)) <= bmp_misc_db->log_tstamp.tv_sec))
          bmp_daemon_msglog_init_kafka_host();
      }
#endif
    }

    /* 
       If select_num == 0 then we got out of select() due to a timeout rather
       than because we had a message from a peer to handle. By now we did all
       routine checks and can happily return to select() again.
    */
    if (!select_num) goto select_again;

    /* New connection is coming in */
    if (FD_ISSET(config.bmp_sock, &read_descs)) {
      int peers_check_idx, peers_num;

      fd = accept(config.bmp_sock, (struct sockaddr *) &client, &clen);
      if (fd == ERR) goto read_data;

#if defined ENABLE_IPV6
      ipv4_mapped_to_ipv4(&client);
#endif

      /* If an ACL is defined, here we check against and enforce it */
      if (allow.num) allowed = check_allow(&allow, (struct sockaddr *)&client);
      else allowed = TRUE;

      if (!allowed) {
	close(fd);
	goto read_data;
      }

      for (peer = NULL, peers_idx = 0; peers_idx < config.nfacctd_bmp_max_peers; peers_idx++) {
        if (!bmp_peers[peers_idx].self.fd) {
          now = time(NULL);

          /*
             Admitted if:
             *  batching feature is disabled or
             *  we have room in the current batch or
             *  we can start a new batch 
          */
          if (bgp_batch_is_admitted(&bp_batch, now)) {
            peer = &bmp_peers[peers_idx].self;
	    bmpp = &bmp_peers[peers_idx];

            if (bmp_peer_init(bmpp, FUNC_TYPE_BMP)) {
	      peer = NULL;
	      bmpp = NULL;
	    }
            else recalc_fds = TRUE;

            log_notification_unset(&log_notifications.bgp_peers_throttling);

            if (bgp_batch_is_enabled(&bp_batch) && peer) {
              if (bgp_batch_is_expired(&bp_batch, now)) bgp_batch_reset(&bp_batch, now);
              if (bgp_batch_is_not_empty(&bp_batch)) bgp_batch_decrease_counter(&bp_batch);
            }

            break;
          }
          else { /* throttle */
            /* We briefly accept the new connection to be able to drop it */
            if (!log_notification_isset(&log_notifications.bmp_peers_throttling, now)) {
              Log(LOG_INFO, "INFO ( %s/%s ): throttling at BMP peer #%u\n", config.name, bmp_misc_db->log_str, peers_idx);
              log_notification_set(&log_notifications.bmp_peers_throttling, now, FALSE);
            }

            close(fd);
            goto read_data;
          }
        }
      }

      if (!peer) {
        int fd;

        /* We briefly accept the new connection to be able to drop it */
        Log(LOG_ERR, "ERROR ( %s/%s ): Insufficient number of BMP peers has been configured by 'bmp_daemon_max_peers' (%d).\n",
                        config.name, bmp_misc_db->log_str, config.nfacctd_bmp_max_peers);
        close(fd);
        goto read_data;
      }

      peer->fd = fd;
      FD_SET(peer->fd, &bkp_read_descs);
      peer->addr.family = ((struct sockaddr *)&client)->sa_family;
      if (peer->addr.family == AF_INET) {
        peer->addr.address.ipv4.s_addr = ((struct sockaddr_in *)&client)->sin_addr.s_addr;
        peer->tcp_port = ntohs(((struct sockaddr_in *)&client)->sin_port);
      }
#if defined ENABLE_IPV6
      else if (peer->addr.family == AF_INET6) {
        memcpy(&peer->addr.address.ipv6, &((struct sockaddr_in6 *)&client)->sin6_addr, 16);
        peer->tcp_port = ntohs(((struct sockaddr_in6 *)&client)->sin6_port);
      }
#endif
      addr_to_str(peer->addr_str, &peer->addr);
      memcpy(&peer->id, &peer->addr, sizeof(struct host_addr)); /* XXX: some inet_ntoa()'s could be around against peer->id */

      if (bmp_misc_db->msglog_backend_methods)
        bgp_peer_log_init(peer, config.nfacctd_bmp_msglog_output, FUNC_TYPE_BMP);

      if (bmp_misc_db->dump_backend_methods)
	bmp_dump_init_peer(peer);

      /* Check: multiple TCP connections per peer */
      for (peers_check_idx = 0, peers_num = 0; peers_check_idx < config.nfacctd_bmp_max_peers; peers_check_idx++) {
        if (peers_idx != peers_check_idx && !memcmp(&bmp_peers[peers_check_idx].self.addr, &peer->addr, sizeof(bmp_peers[peers_check_idx].self.addr))) {
	  if (bmp_misc_db->is_thread && !config.nfacctd_bgp_to_agent_map) {
            Log(LOG_WARNING, "WARN ( %s/%s ): [%s] Multiple connections from peer and no bgp_agent_map defined.\n",
                                config.name, bmp_misc_db->log_str, bmp_peers[peers_check_idx].self.addr_str);
	  }
        }
        else {
          if (bmp_peers[peers_check_idx].self.fd) peers_num++;
        }
      }

      Log(LOG_INFO, "INFO ( %s/%s ): [%s] BMP peers usage: %u/%u\n", config.name, bmp_misc_db->log_str, peer->addr_str, peers_num, config.nfacctd_bmp_max_peers);
    }

    read_data:

    /*
       We have something coming in: let's lookup which peer is that.
       FvD: To avoid starvation of the "later established" peers, we
       offset the start of the search in a round-robin style.
    */
    for (peer = NULL, peers_idx = 0; peers_idx < max_peers_idx; peers_idx++) {
      int loc_idx = (peers_idx + peers_idx_rr) % max_peers_idx;

      if (bmp_peers[loc_idx].self.fd && FD_ISSET(bmp_peers[loc_idx].self.fd, &read_descs)) {
        peer = &bmp_peers[loc_idx].self;
	bmpp = &bmp_peers[loc_idx];
        peers_idx_rr = (peers_idx_rr + 1) % max_peers_idx;
        break;
      }
    }

    if (!peer) goto select_again;

    ret = recv(peer->fd, &peer->buf.base[peer->buf.truncated_len], (peer->buf.len - peer->buf.truncated_len), 0);
    peer->msglen = (ret + peer->buf.truncated_len);

    if (ret <= 0) {
      Log(LOG_INFO, "INFO ( %s/%s ): [%s] BMP connection reset by peer (%d).\n", config.name, bmp_misc_db->log_str, peer->addr_str, errno);
      FD_CLR(peer->fd, &bkp_read_descs);
      bmp_peer_close(bmpp, FUNC_TYPE_BMP);
      recalc_fds = TRUE;
      goto select_again;
    }
    else {
      pkt_remaining_len = bmp_process_packet(peer->buf.base, peer->msglen, bmpp);

      /* handling offset for TCP segment reassembly */
      if (pkt_remaining_len) peer->buf.truncated_len = bmp_packet_adj_offset(peer->buf.base, peer->buf.len, peer->msglen,
									     pkt_remaining_len, peer->addr_str);
      else peer->buf.truncated_len = 0;
    }
  }
}
Example #3
0
/* Functions */
void imt_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr) 
{
  int maxqsize = (MAX_QUERIES*sizeof(struct pkt_primitives))+sizeof(struct query_header)+2;
  struct sockaddr cAddr;
  struct pkt_data *data;
  struct ports_table pt;
  unsigned char srvbuf[maxqsize];
  unsigned char *srvbufptr;
  struct query_header *qh;
  unsigned char *pipebuf;
  char path[] = "/tmp/collect.pipe";
  short int go_to_clear = FALSE;
  u_int32_t request, sz;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  int datasize = ((struct channels_list_entry *)ptr)->datasize;
  pid_t core_pid = ((struct channels_list_entry *)ptr)->core_pid;
  struct extra_primitives extras;
  unsigned char *rgptr;
  int pollagain = 0;
  u_int32_t seq = 0;
  int rg_err_count = 0;
  int amqp_timeout = INT_MAX, ret;
  struct pkt_bgp_primitives *pbgp, empty_pbgp;
  struct pkt_nat_primitives *pnat, empty_pnat;
  struct pkt_mpls_primitives *pmpls, empty_pmpls;
  char *pcust, empty_pcust[] = "";
  struct pkt_vlen_hdr_primitives *pvlen, empty_pvlen;
  struct networks_file_data nfd;
  struct timeval select_timeout;
  struct primitives_ptrs prim_ptrs;
  struct plugins_list_entry *plugin_data = ((struct channels_list_entry *)ptr)->plugin;

  fd_set read_descs, bkp_read_descs; /* select() stuff */
  int select_fd, lock = FALSE;
  int cLen, num, sd, sd2;
  char *dataptr;

#ifdef WITH_RABBITMQ
  struct p_amqp_host *amqp_host = &((struct channels_list_entry *)ptr)->amqp_host;
#endif

  memcpy(&config, cfgptr, sizeof(struct configuration));
  memcpy(&extras, &((struct channels_list_entry *)ptr)->extras, sizeof(struct extra_primitives));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "IMT Plugin", config.name);

  if (config.proc_priority) {
    int ret;

    ret = setpriority(PRIO_PROCESS, 0, config.proc_priority);
    if (ret) Log(LOG_WARNING, "WARN ( %s/%s ): proc_priority failed (errno: %d)\n", config.name, config.type, errno);
    else Log(LOG_INFO, "INFO ( %s/%s ): proc_priority set to %d\n", config.name, config.type, getpriority(PRIO_PROCESS, 0));
  }

  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);
  if (config.logfile) {
    fclose(config.logfile_fd);
    config.logfile_fd = open_output_file(config.logfile, "a", FALSE);
  }

  if (extras.off_pkt_vlen_hdr_primitives) {
    Log(LOG_ERR, "ERROR ( %s/%s ): variable-length primitives, ie. label, are not supported in IMT plugin. Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  reload_map = FALSE;
  status->wakeup = TRUE;

  /* a bunch of default definitions and post-checks */
  pipebuf = (unsigned char *) malloc(config.buffer_size);
  if (!pipebuf) {
    Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (pipebuf). Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  if (config.pipe_amqp) {
    plugin_pipe_amqp_compile_check();
#ifdef WITH_RABBITMQ
    pipe_fd = plugin_pipe_amqp_connect_to_consume(amqp_host, plugin_data);
#endif
  }
  else setnonblocking(pipe_fd);

  memset(pipebuf, 0, config.buffer_size);
  no_more_space = FALSE;

  if (config.what_to_count & (COUNT_SUM_HOST|COUNT_SUM_NET))
    imt_insert_func = sum_host_insert;
  else if (config.what_to_count & COUNT_SUM_PORT) imt_insert_func = sum_port_insert;
  else if (config.what_to_count & COUNT_SUM_AS) imt_insert_func = sum_as_insert;
#if defined (HAVE_L2)
  else if (config.what_to_count & COUNT_SUM_MAC) imt_insert_func = sum_mac_insert;
#endif
  else imt_insert_func = insert_accounting_structure;

  memset(&nt, 0, sizeof(nt));
  memset(&nc, 0, sizeof(nc));
  memset(&pt, 0, sizeof(pt));

  load_networks(config.networks_file, &nt, &nc);
  set_net_funcs(&nt);

  if (config.ports_file) load_ports(config.ports_file, &pt);
  if (config.pkt_len_distrib_bins_str) load_pkt_len_distrib_bins();
  else {
    if (config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB) {
      Log(LOG_ERR, "ERROR ( %s/%s ): 'aggregate' contains pkt_len_distrib but no 'pkt_len_distrib_bins' defined. Exiting.\n", config.name, config.type);
      exit_plugin(1); 
    }
  }

  if (!config.num_memory_pools) config.num_memory_pools = NUM_MEMORY_POOLS;
  if (!config.memory_pool_size) config.memory_pool_size = MEMORY_POOL_SIZE;  
  else {
    if (config.memory_pool_size < sizeof(struct acc)) {
      Log(LOG_WARNING, "WARN ( %s/%s ): enforcing memory pool's minimum size, %d bytes.\n", config.name, config.type, sizeof(struct acc));
      config.memory_pool_size = MEMORY_POOL_SIZE;
    }
  }

  if (!config.imt_plugin_path) config.imt_plugin_path = path; 
  if (!config.buckets) config.buckets = MAX_HOSTS;

  init_memory_pool_table(config);
  if (mpd == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate memory pools table\n", config.name, config.type);
    exit_plugin(1);
  }

  current_pool = request_memory_pool(config.buckets*sizeof(struct acc));
  if (current_pool == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate first memory pool, try with larger value.\n", config.name, config.type);
    exit_plugin(1);
  }
  a = current_pool->base_ptr;

  lru_elem_ptr = malloc(config.buckets*sizeof(struct acc *));
  if (lru_elem_ptr == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate LRU element pointers.\n", config.name, config.type);
    exit_plugin(1);
  }
  else memset(lru_elem_ptr, 0, config.buckets*sizeof(struct acc *));

  current_pool = request_memory_pool(config.memory_pool_size);
  if (current_pool == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate more memory pools, try with larger value.\n", config.name, config.type);
    exit_plugin(1);
  }

  signal(SIGHUP, reload); /* handles reopening of syslog channel */
  signal(SIGINT, exit_now); /* exit lane */
  signal(SIGUSR1, SIG_IGN);
  signal(SIGUSR2, reload_maps);
  signal(SIGPIPE, SIG_IGN);
  signal(SIGCHLD, SIG_IGN); 

  memset(&empty_pbgp, 0, sizeof(empty_pbgp));
  memset(&empty_pnat, 0, sizeof(empty_pnat));
  memset(&empty_pmpls, 0, sizeof(empty_pmpls));
  memset(&empty_pvlen, 0, sizeof(empty_pvlen));

  memset(&table_reset_stamp, 0, sizeof(table_reset_stamp));

  /* building a server for interrogations by clients */
  sd = build_query_server(config.imt_plugin_path);
  cLen = sizeof(cAddr);

  /* preparing for synchronous I/O multiplexing */
  select_fd = 0;

  FD_ZERO(&read_descs);
  FD_SET(sd, &read_descs);

  if (sd > select_fd) select_fd = sd;
  if (pipe_fd != ERR) {
    FD_SET(pipe_fd, &read_descs);
    if (pipe_fd > select_fd) select_fd = pipe_fd;
  }

  select_fd++;
  memcpy(&bkp_read_descs, &read_descs, sizeof(read_descs));

  qh = (struct query_header *) srvbuf;

  /* plugin main loop */
  for(;;) {
    select_again:
    select_timeout.tv_sec = MIN(DEFAULT_IMT_PLUGIN_SELECT_TIMEOUT, amqp_timeout);
    select_timeout.tv_usec = 0;

    memcpy(&read_descs, &bkp_read_descs, sizeof(bkp_read_descs));
    num = select(select_fd, &read_descs, NULL, NULL, &select_timeout);

    gettimeofday(&cycle_stamp, NULL);

#ifdef WITH_RABBITMQ
    if (config.pipe_amqp && pipe_fd == ERR) {
      if (select_timeout.tv_sec == amqp_timeout) {
        pipe_fd = plugin_pipe_amqp_connect_to_consume(amqp_host, plugin_data);

        if (pipe_fd != ERR) {
          FD_SET(pipe_fd, &bkp_read_descs);
          if (pipe_fd > select_fd) select_fd = pipe_fd;
          select_fd++;
	  amqp_timeout = LONGLONG_RETRY;
        }
	else amqp_timeout = P_broker_timers_get_retry_interval(&amqp_host->btimers);
      }
      else {
        amqp_timeout = ((P_broker_timers_get_last_fail(&amqp_host->btimers) + P_broker_timers_get_retry_interval(&amqp_host->btimers)) - cycle_stamp.tv_sec);
        assert(amqp_timeout >= 0);
      }
    }
#endif

    if (num <= 0) {
      if (getppid() == 1) {
	Log(LOG_ERR, "ERROR ( %s/%s ): Core process *seems* gone. Exiting.\n", config.name, config.type);
	exit_plugin(1);
      } 

      goto select_again;  
    }

    /* doing server tasks */
    if (FD_ISSET(sd, &read_descs)) {
      struct pollfd pfd;
      int ret;

      sd2 = accept(sd, &cAddr, &cLen);
      setblocking(sd2);
      srvbufptr = srvbuf;
      sz = maxqsize;

      pfd.fd = sd2;
      pfd.events = POLLIN;

      recv_again:
      ret = poll(&pfd, 1, 1000);
      if (ret == 0) {
        Log(LOG_WARNING, "WARN ( %s/%s ): Timed out while processing fragmented query.\n", config.name, config.type); 
        close(sd2);
	goto select_again;
      }
      else {
        num = recv(sd2, srvbufptr, sz, 0);
        if (srvbufptr[num-1] != '\x4') {
	  srvbufptr += num;
	  sz -= num;
	  goto recv_again; /* fragmented query */
        }
      }

      num = num+(maxqsize-sz);

      if (qh->num > MAX_QUERIES) {
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): request discarded. Too much queries.\n", config.name, config.type);
	close(sd2);
	continue;
      }

      request = qh->type;
      if (request & WANT_RESET) request ^= WANT_RESET;
      if (request & WANT_LOCK_OP) {
	lock = TRUE;
	request ^= WANT_LOCK_OP;
      }

      /* 
	 - if explicitely required, we do not fork: query obtains exclusive
	   control - lock - over the memory table; 
	 - operations that may cause inconsistencies (full erasure, counter
	   reset for individual entries, etc.) are entitled of an exclusive
	   lock.
	 - if query is matter of just a single short-lived walk through the
	   table, we avoid fork(): the plugin will serve the request;
         - in all other cases, we fork; the newly created child will serve
	   queries asyncronously.
      */

      if (request & WANT_ERASE) {
	request ^= WANT_ERASE;
	if (request) {
	  if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
	  else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
	}
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
	go_to_clear = TRUE;  
      }
      else if (((request == WANT_COUNTER) || (request == WANT_MATCH)) &&
	(qh->num == 1) && (qh->what_to_count == config.what_to_count)) {
	if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      } 
      else if (request == WANT_CLASS_TABLE) {
	if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      }
      else if (request == WANT_PKT_LEN_DISTRIB_TABLE) {
        if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      }
      else {
	if (lock) {
	  if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
          else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
          Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
	}
	else { 
          switch (fork()) {
	  case -1: /* Something went wrong */
	    Log(LOG_WARNING, "WARN ( %s/%s ): Unable to serve client query: %s\n", config.name, config.type, strerror(errno));
	    break;
          case 0: /* Child */
            close(sd);
	    pm_setproctitle("%s [%s]", "IMT Plugin -- serving client", config.name);
            if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, TRUE);
	    else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
            Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
            close(sd2);
            exit(0);
          default: /* Parent */
            break;
          } 
	}
      }
      close(sd2);
    }

    /* clearing stats if requested */
    if (go_to_clear) {
      /* When using extended BGP features we need to
	 free() up memory allocations before erasing */ 
      /* XXX: given the current use of empty_* vars we have always to
         free_extra_allocs() in order to prevent memory leaks */
      /*
      if (extras.off_pkt_bgp_primitives || extras.off_pkt_nat_primitives ||
	  extras.off_pkt_mpls_primitives || extras.off_custom_primitives)
      */
	free_extra_allocs(); 
      clear_memory_pool_table();
      current_pool = request_memory_pool(config.buckets*sizeof(struct acc));
      if (current_pool == NULL) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Cannot allocate my first memory pool, try with larger value.\n", config.name, config.type);
        exit_plugin(1);
      }
      a = current_pool->base_ptr;

      current_pool = request_memory_pool(config.memory_pool_size);
      if (current_pool == NULL) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Cannot allocate more memory pools, try with larger value.\n", config.name, config.type);
        exit_plugin(1);
      }
      go_to_clear = FALSE;
      no_more_space = FALSE;
      memcpy(&table_reset_stamp, &cycle_stamp, sizeof(struct timeval));
    }

    if (FD_ISSET(pipe_fd, &read_descs)) {
      if (!config.pipe_amqp) {
        if (!pollagain) {
          seq++;
          seq %= MAX_SEQNUM;
        }

        pollagain = FALSE;
        if ((num = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0)
          exit_plugin(1); /* we exit silently; something happened at the write end */

        if (num < 0) {
          pollagain = TRUE;
          goto select_again;
        }

        memcpy(pipebuf, rgptr, config.buffer_size);
        if (((struct ch_buf_hdr *)pipebuf)->seq != seq) {
          rg_err_count++;
          if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
	    Log(LOG_WARNING, "WARN ( %s/%s ): Missing data detected (plugin_buffer_size=%llu plugin_pipe_size=%llu).\n",
		config.name, config.type, config.buffer_size, config.pipe_size);
	    Log(LOG_WARNING, "WARN ( %s/%s ): Increase values or look for plugin_buffer_size, plugin_pipe_size in CONFIG-KEYS document.\n\n",
		config.name, config.type);
            seq = ((struct ch_buf_hdr *)pipebuf)->seq;
	  }
	}
      }
#ifdef WITH_RABBITMQ
      else {
        ret = p_amqp_consume_binary(amqp_host, pipebuf, config.buffer_size);
        if (!ret) {
          seq = ((struct ch_buf_hdr *)pipebuf)->seq;
	  amqp_timeout = LONGLONG_RETRY;
	  num = TRUE;
	}
	else {
          if (pipe_fd != ERR) {
            FD_CLR(pipe_fd, &bkp_read_descs);
	    pipe_fd = ERR;
          }
	  amqp_timeout = P_broker_timers_get_retry_interval(&amqp_host->btimers);
	}
      }
#endif

      if (num > 0) {
	data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));

	if (config.debug_internal_msg) 
	  Log(LOG_DEBUG, "DEBUG ( %s/%s ): buffer received cpid=%u seq=%u num_entries=%u\n",
		config.name, config.type, core_pid, seq, ((struct ch_buf_hdr *)pipebuf)->num);

	if (!config.pipe_check_core_pid || ((struct ch_buf_hdr *)pipebuf)->core_pid == core_pid) {
	while (((struct ch_buf_hdr *)pipebuf)->num > 0) {

	  // XXX: to be optimized: remove empty_* vars
          if (extras.off_pkt_bgp_primitives)
	    pbgp = (struct pkt_bgp_primitives *) ((u_char *)data + extras.off_pkt_bgp_primitives);
	  else pbgp = &empty_pbgp;
          if (extras.off_pkt_nat_primitives) 
            pnat = (struct pkt_nat_primitives *) ((u_char *)data + extras.off_pkt_nat_primitives);
          else pnat = &empty_pnat;
          if (extras.off_pkt_mpls_primitives) 
            pmpls = (struct pkt_mpls_primitives *) ((u_char *)data + extras.off_pkt_mpls_primitives);
          else pmpls = &empty_pmpls;
          if (extras.off_custom_primitives)
	    pcust = ((u_char *)data + extras.off_custom_primitives);
          else pcust = empty_pcust;
	  if (extras.off_pkt_vlen_hdr_primitives)
	    pvlen = (struct pkt_vlen_hdr_primitives *) ((u_char *)data + extras.off_pkt_vlen_hdr_primitives); 
	  else pvlen = &empty_pvlen;

	  for (num = 0; net_funcs[num]; num++)
	    (*net_funcs[num])(&nt, &nc, &data->primitives, pbgp, &nfd);

	  if (config.ports_file) {
	    if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
	    if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
	  }

	  if (config.pkt_len_distrib_bins_str &&
	      config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB)
	    evaluate_pkt_len_distrib(data);

	  prim_ptrs.data = data; 
	  prim_ptrs.pbgp = pbgp; 
	  prim_ptrs.pnat = pnat;
	  prim_ptrs.pmpls = pmpls;
	  prim_ptrs.pcust = pcust;
	  prim_ptrs.pvlen = pvlen;
	  
          (*imt_insert_func)(&prim_ptrs);

	  ((struct ch_buf_hdr *)pipebuf)->num--;
	  if (((struct ch_buf_hdr *)pipebuf)->num) {
            dataptr = (unsigned char *) data;
            dataptr += datasize;
            data = (struct pkt_data *) dataptr;
	  }
        }
	}
      }
    } 

    if (reload_map) {
      load_networks(config.networks_file, &nt, &nc);
      load_ports(config.ports_file, &pt);
      reload_map = FALSE;
    }
  }
}