예제 #1
0
void Tee_init_socks()
{
  struct tee_receiver *target = NULL;
  struct sockaddr *sa;
  int pool_idx, recv_idx, err;
  char dest_addr[256], dest_serv[256];

  for (pool_idx = 0; pool_idx < receivers.num; pool_idx++) {
    for (recv_idx = 0; recv_idx < receivers.pools[pool_idx].num; recv_idx++) {
      target = &receivers.pools[pool_idx].receivers[recv_idx];
      sa = (struct sockaddr *) &target->dest;

      if (sa->sa_family != 0) {
        if ((err = getnameinfo(sa, target->dest_len, dest_addr, sizeof(dest_addr),
            dest_serv, sizeof(dest_serv), NI_NUMERICHOST)) == -1) {
          Log(LOG_ERR, "ERROR ( %s/%s ): getnameinfo: %d\n", config.name, config.type, err);
          exit_plugin(1);
        }
      }

      target->fd = Tee_prepare_sock((struct sockaddr *) &target->dest, target->dest_len);

      if (config.debug) {
	struct host_addr recv_addr;
        u_char recv_addr_str[INET6_ADDRSTRLEN];
	u_int16_t recv_port;

	sa_to_addr(&target->dest, &recv_addr, &recv_port); 
        addr_to_str(recv_addr_str, &recv_addr);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): pool ID: %u :: receiver: %s :: fd: %d.\n",
                config.name, config.type, receivers.pools[pool_idx].id, recv_addr_str, target->fd);
      }
    }
  }
}
예제 #2
0
파일: tee_plugin.c 프로젝트: NeilW/pmacct
// XXX: duplicate function
void Tee_parse_hostport(const char *s, struct sockaddr *addr, socklen_t *len)
{
        char *orig, *host, *port;
        struct addrinfo hints, *res;
        int herr;

        if ((host = orig = strdup(s)) == NULL) {
                fprintf(stderr, "Out of memory\n");
                exit_plugin(1);
        }

        trim_spaces(host);
        trim_spaces(orig);

        if ((port = strrchr(host, ':')) == NULL ||
            *(++port) == '\0' || *host == '\0') {
                fprintf(stderr, "Invalid -n argument.\n");
                exit_plugin(1);
        }
        *(port - 1) = '\0';

        /* Accept [host]:port for numeric IPv6 addresses */
        if (*host == '[' && *(port - 2) == ']') {
                host++;
                *(port - 2) = '\0';
        }

        memset(&hints, '\0', sizeof(hints));
        hints.ai_socktype = SOCK_DGRAM;
        if ((herr = getaddrinfo(host, port, &hints, &res)) == -1) {
                fprintf(stderr, "Address lookup failed: %s\n",
                    gai_strerror(herr));
                exit_plugin(1);
        }
        if (res == NULL || res->ai_addr == NULL) {
                fprintf(stderr, "No addresses found for [%s]:%s\n", host, port);
                exit_plugin(1);
        }
        if (res->ai_addrlen > *len) {
                Log(LOG_ERR, "ERROR ( %s/%s ): Address too long.\n", config.name, config.type);
                exit_plugin(1);
        }
        memcpy(addr, res->ai_addr, res->ai_addrlen);
        free(orig);
        *len = res->ai_addrlen;
}
예제 #3
0
void MY_create_backend(struct DBdesc *db)
{
  db->desc = malloc(sizeof(MYSQL));
  if (!db->desc) {
    Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (MY_create_backend). Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }
  memset(db->desc, 0, sizeof(MYSQL));
}
예제 #4
0
void load_ports(char *filename, struct ports_table *pt)
{
  FILE *file;
  char buf[8];
  int ret, rows = 0, newline = TRUE;
  struct stat st;

  memset(&st, 0, sizeof(st));

  if (filename) {
    if ((file = fopen(filename,"r")) == NULL) {
      Log(LOG_ERR, "ERROR ( %s/%s ): [%s] file not found.\n", config.name, config.type, filename);
      goto handle_error;
    }
    else {
      memset(pt, 0, sizeof(struct ports_table));

      while (!feof(file)) {
        if (fgets(buf, 8, file)) { 
	  if (strchr(buf, '\n')) { 
            if (!newline) {
	      newline = TRUE; 
	      continue;
	    }
	  }
	  else {
            if (!newline) continue;
	    newline = FALSE;
	  }
	  trim_spaces(buf);
	  if (!strlen(buf) || (buf[0] == '!')) continue;
	  ret = atoi(buf); 
	  if ((ret > 0) && (ret < PORTS_TABLE_ENTRIES)) pt->table[ret] = TRUE;
	  else Log(LOG_WARNING, "WARN ( %s/%s ): [%s:%u] invalid port specified.\n", config.name, config.type, filename, rows); 
	}
      }
      fclose(file);

      stat(filename, &st);
      pt->timestamp = st.st_mtime;
    }
  }

  return;

  handle_error:
  if (pt->timestamp) {
    Log(LOG_WARNING, "WARN ( %s/%s ): [%s] Rolling back the old Ports Table.\n", config.name, config.type, filename);

    /* we update the timestamp to avoid loops */
    stat(filename, &st);
    pt->timestamp = st.st_mtime;
  }
  else exit_plugin(1);
}
예제 #5
0
파일: tee_plugin.c 프로젝트: NeilW/pmacct
int Tee_prepare_sock(struct sockaddr *addr, socklen_t len)
{
  int s, ret = 0;

  if (!config.tee_transparent) {
    struct host_addr source_ip;
    struct sockaddr ssource_ip;

    if (config.nfprobe_source_ip) {
      ret = str_to_addr(config.nfprobe_source_ip, &source_ip);
      addr_to_sa(&ssource_ip, &source_ip, 0);
    }

    if ((s = socket(addr->sa_family, SOCK_DGRAM, 0)) == -1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): socket() error: %s\n", config.name, config.type, strerror(errno));
      exit_plugin(1);
    }

    if (ret && bind(s, (struct sockaddr *) &ssource_ip, sizeof(ssource_ip)) == -1)
      Log(LOG_ERR, "ERROR ( %s/%s ): bind() error: %s\n", config.name, config.type, strerror(errno));
  }
  else {
    if ((s = socket(addr->sa_family, SOCK_RAW, IPPROTO_RAW)) == -1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): socket() error: %s\n", config.name, config.type, strerror(errno));
      exit_plugin(1);
    }
  }

  /* XXX: SNDBUF tuning? */

  if (connect(s, (struct sockaddr *)addr, len) == -1) {
    Log(LOG_ERR, "ERROR ( %s/%s ): connect() error: %s\n", config.name, config.type, strerror(errno));
    exit_plugin(1);
  }

  return(s);
}
예제 #6
0
void kafka_avro_schema_purge(char *avro_schema_str)
{
  struct p_kafka_host kafka_avro_schema_host;
  int part, part_cnt, tpc;

  if (!avro_schema_str || !config.kafka_avro_schema_topic) return;

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  p_kafka_init_host(&kafka_avro_schema_host, config.kafka_config_file);
  p_kafka_connect_to_produce(&kafka_avro_schema_host);
  p_kafka_set_broker(&kafka_avro_schema_host, config.sql_host, config.kafka_broker_port);
  p_kafka_set_topic(&kafka_avro_schema_host, config.kafka_avro_schema_topic);
  p_kafka_set_partition(&kafka_avro_schema_host, config.kafka_partition);
  p_kafka_set_key(&kafka_avro_schema_host, config.kafka_partition_key, config.kafka_partition_keylen);
  p_kafka_set_content_type(&kafka_avro_schema_host, PM_KAFKA_CNT_TYPE_STR);

  if (config.kafka_partition_dynamic) {
    rd_kafka_resp_err_t err;
    const struct rd_kafka_metadata *metadata;

    err = rd_kafka_metadata(kafka_avro_schema_host.rk, 0, kafka_avro_schema_host.topic, &metadata, 100);
    if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to get Kafka metadata for topic %s: %s\n",
              config.name, config.type, kafka_avro_schema_host.topic, rd_kafka_err2str(err));
      exit_plugin(1);
    }

    part_cnt = -1;
    for (tpc = 0 ; tpc < metadata->topic_cnt ; tpc++) {
      const struct rd_kafka_metadata_topic *t = &metadata->topics[tpc];
      if (!strcmp(t->topic, config.kafka_avro_schema_topic)) {
          part_cnt = t->partition_cnt;
          break;
      }
    }

    for(part = 0; part < part_cnt; part++) {
      p_kafka_produce_data_to_part(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str), part);
    }
  }
  else {
    p_kafka_produce_data(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str));
  }

  p_kafka_close(&kafka_avro_schema_host, FALSE);
}
예제 #7
0
void PG_compose_conn_string(struct DBdesc *db, char *host)
{
  char *string;
  int slen = SRVBUFLEN;
  
  if (!db->conn_string) {
    db->conn_string = (char *) malloc(slen);
    if (!db->conn_string) {
      Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (PG_compose_conn_string). Exiting ..\n", config.name, config.type);
      exit_plugin(1);
    }
    string = db->conn_string;

    snprintf(string, slen, "dbname=%s user=%s password=%s", config.sql_db, config.sql_user, config.sql_passwd);
    slen -= strlen(string);
    string += strlen(string);

    if (host) snprintf(string, slen, " host=%s", host);
  }
}
예제 #8
0
int Tee_parse_hostport(const char *s, struct sockaddr *addr, socklen_t *len)
{
  char *orig, *host, *port;
  struct addrinfo hints, *res;
  int herr;

  memset(&hints, '\0', sizeof(hints));

  if ((host = orig = strdup(s)) == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Tee_parse_hostport() out of memory. Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  trim_spaces(host);
  trim_spaces(orig);

  if ((port = strrchr(host, ':')) == NULL || *(++port) == '\0' || *host == '\0') return TRUE;

  *(port - 1) = '\0';

  /* Accept [host]:port for numeric IPv6 addresses */
  if (*host == '[' && *(port - 2) == ']') {
    host++;
    *(port - 2) = '\0';
    hints.ai_family = AF_INET6;
  }

  hints.ai_socktype = SOCK_DGRAM;

  /* Validations */
  if ((herr = getaddrinfo(host, port, &hints, &res)) == -1) return TRUE;
  if (res == NULL || res->ai_addr == NULL) return TRUE;
  if (res->ai_addrlen > *len) return TRUE;

  memcpy(addr, res->ai_addr, res->ai_addrlen);
  free(orig);
  *len = res->ai_addrlen;

  return FALSE;
}
예제 #9
0
int main(const int argc, char *const argv[])
{
	int c;
	long rc;
	key_serial_t key = 0;
	char *buf;
	unsigned int timeout = 600; /* default idmap cache timeout */

	openlog(prog, 0, LOG_DAEMON);

	while ((c = getopt_long(argc, argv, "ht:v",
					long_options, NULL)) != -1) {
		switch (c) {
		case 'h':
			rc = 0;
			usage();
			goto out;
		case 't':
			rc = str_to_uint(optarg, &timeout);
			if (rc) {
				syslog(LOG_ERR, "bad timeout value %s: %s",
					optarg, strerror(rc));
				goto out;
			}
			break;
		case 'v':
			rc = 0;
			printf("version: %s\n", VERSION);
			goto out;
		default:
			rc = EINVAL;
			syslog(LOG_ERR, "unknown option: %c", c);
			goto out;
		}
	}

	rc = 1;
	/* is there a key? */
	if (argc <= optind) {
		usage();
		goto out;
	}

	/* get key and keyring values */
	errno = 0;
	key = strtol(argv[optind], NULL, 10);
	if (errno != 0) {
		key = 0;
		syslog(LOG_ERR, "Invalid key format: %s", strerror(errno));
		goto out;
	}

	if (init_plugin(&plugin_handle)) {
		plugin_handle = NULL;
		syslog(LOG_ERR, "Unable to initialize ID mapping plugin: %s",
			plugin_errmsg);
		goto out;
	}

	/* set timeout on key */
	rc = keyctl_set_timeout(key, timeout);
	if (rc == -1) {
		syslog(LOG_ERR, "unable to set key timeout: %s",
			strerror(errno));
		goto out_exit_plugin;
	}

	rc = keyctl_describe_alloc(key, &buf);
	if (rc == -1) {
		syslog(LOG_ERR, "keyctl_describe_alloc failed: %s",
		       strerror(errno));
		goto out_exit_plugin;
	}

	syslog(LOG_DEBUG, "key description: %s", buf);

	rc = cifs_idmap(key, buf);
out_exit_plugin:
	exit_plugin(plugin_handle);
out:
	return rc;
}
예제 #10
0
int Tee_prepare_sock(struct sockaddr *addr, socklen_t len)
{
  int s, ret = 0;

  if (!config.tee_transparent) {
    struct host_addr source_ip;
#if defined ENABLE_IPV6
    struct sockaddr_storage ssource_ip;
#else
    struct sockaddr ssource_ip;
#endif

    if (config.nfprobe_source_ip) {
      ret = str_to_addr(config.nfprobe_source_ip, &source_ip);
      addr_to_sa((struct sockaddr *) &ssource_ip, &source_ip, 0);
    }

    if ((s = socket(addr->sa_family, SOCK_DGRAM, 0)) == -1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): socket() error: %s\n", config.name, config.type, strerror(errno));
      exit_plugin(1);
    }

    if (config.nfprobe_ipprec) {
      int opt = config.nfprobe_ipprec << 5;
      int rc;

      rc = setsockopt(s, IPPROTO_IP, IP_TOS, &opt, sizeof(opt));
      if (rc < 0) Log(LOG_WARNING, "WARN ( %s/%s ): setsockopt() failed for IP_TOS: %s\n", config.name, config.type, strerror(errno));
    }

    if (ret && bind(s, (struct sockaddr *) &ssource_ip, sizeof(ssource_ip)) == -1)
      Log(LOG_ERR, "ERROR ( %s/%s ): bind() error: %s\n", config.name, config.type, strerror(errno));
  }
  else {
    int hincl = 1;                  /* 1 = on, 0 = off */

    if ((s = socket(addr->sa_family, SOCK_RAW, IPPROTO_RAW)) == -1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): socket() error: %s\n", config.name, config.type, strerror(errno));
      exit_plugin(1);
    }


#if defined BSD
    setsockopt(s, IPPROTO_IP, IP_HDRINCL, &hincl, sizeof(hincl));
#endif
  }

  if (config.tee_pipe_size) {
    int l = sizeof(config.tee_pipe_size);
    int saved = 0, obtained = 0;
    
    getsockopt(s, SOL_SOCKET, SO_SNDBUF, &saved, &l);
    Setsocksize(s, SOL_SOCKET, SO_SNDBUF, &config.tee_pipe_size, sizeof(config.tee_pipe_size));
    getsockopt(s, SOL_SOCKET, SO_SNDBUF, &obtained, &l);
  
    if (obtained < saved) {
      Setsocksize(s, SOL_SOCKET, SO_SNDBUF, &saved, l);
      getsockopt(s, SOL_SOCKET, SO_SNDBUF, &obtained, &l);
    }
    Log(LOG_INFO, "INFO ( %s/%s ): tee_pipe_size: obtained=%d target=%d.\n", config.name, config.type, obtained, config.tee_pipe_size);
  }

  if (connect(s, (struct sockaddr *)addr, len) == -1) {
    Log(LOG_ERR, "ERROR ( %s/%s ): connect() error: %s\n", config.name, config.type, strerror(errno));
    exit_plugin(1);
  }

  return(s);
}
예제 #11
0
파일: tee_plugin.c 프로젝트: NeilW/pmacct
void tee_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr)
{
  struct pkt_msg *msg;
  unsigned char *pipebuf;
  struct pollfd pfd;
  int timeout, err;
  int ret, num, fd;
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  char *dataptr, dest_addr[256], dest_serv[256];

  struct sockaddr dest;
  socklen_t dest_len;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0;

  /* XXX: glue */
  memcpy(&config, cfgptr, sizeof(struct configuration));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "Tee Plugin", config.name);
  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);

  /* signal handling */
  signal(SIGINT, Tee_exit_now);
  signal(SIGUSR1, SIG_IGN);
  signal(SIGUSR2, SIG_IGN);
  signal(SIGPIPE, SIG_IGN);
#if !defined FBSD4
  signal(SIGCHLD, SIG_IGN);
#else
  signal(SIGCHLD, ignore_falling_child);
#endif

  if (config.tee_transparent && getuid() != 0) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Transparent mode requires super-user permissions. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }

  if (!config.nfprobe_receiver) {
    Log(LOG_ERR, "ERROR ( %s/%s ): tee_receiver is not specified. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&dest, 0, sizeof(dest));
  dest_len = sizeof(dest);
  Tee_parse_hostport(config.nfprobe_receiver, (struct sockaddr *)&dest, &dest_len);

  config.print_refresh_time = DEFAULT_TEE_REFRESH_TIME;
  timeout = config.print_refresh_time*1000;

  pipebuf = (unsigned char *) Malloc(config.buffer_size);

  pfd.fd = pipe_fd;
  pfd.events = POLLIN;
  setnonblocking(pipe_fd);

  memset(pipebuf, 0, config.buffer_size);

  /* Arrange send socket */
  if (dest.sa_family != 0) {
    if ((err = getnameinfo((struct sockaddr *) &dest,
            dest_len, dest_addr, sizeof(dest_addr),
            dest_serv, sizeof(dest_serv), NI_NUMERICHOST)) == -1) {
      Log(LOG_ERR, "ERROR ( %s/%s ): getnameinfo: %d\n", config.name, config.type, err);
      exit_plugin(1);
    }
    fd = Tee_prepare_sock(&dest, dest_len);
  }

  /* plugin main loop */
  for (;;) {
    poll_again:
    status->wakeup = TRUE;
    ret = poll(&pfd, 1, timeout);
    if (ret < 0) goto poll_again;

    switch (ret) {
    case 0: /* timeout */
      /* reserved for future since we don't currently cache/batch/etc */
      break;
    default: /* we received data */
      read_data:
      if (!pollagain) {
        seq++;
        seq %= MAX_SEQNUM;
        if (seq == 0) rg_err_count = FALSE;
      }
      else {
        if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0)
          exit_plugin(1); /* we exit silently; something happened at the write end */
      }

      if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
        if (!pollagain) {
          pollagain = TRUE;
          goto poll_again;
        }
        else {
          rg_err_count++;
          if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
            Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
            Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
            Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%u'.\n", config.pipe_size);
            Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%u'.\n", config.buffer_size);
            Log(LOG_ERR, "- increase system maximum socket size.\n\n");
          }
          seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
        }
      }

      pollagain = FALSE;
      memcpy(pipebuf, rg->ptr, bufsz);
      if ((rg->ptr+bufsz) >= rg->end) rg->ptr = rg->base;
      else rg->ptr += bufsz;

      msg = (struct pkt_msg *) (pipebuf+sizeof(struct ch_buf_hdr));

      while (((struct ch_buf_hdr *)pipebuf)->num) {
        Tee_send(msg, &dest, fd);

        ((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
	  dataptr = (unsigned char *) msg;
          dataptr += PmsgSz;
	  msg = (struct pkt_msg *) dataptr;
	}
      }
      goto read_data;
    }
  }  
}
예제 #12
0
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  struct pkt_tunnel_primitives *ptun = NULL;
  char *pcust = NULL;
  struct pkt_vlen_hdr_primitives *pvlen = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  struct pkt_tunnel_primitives empty_ptun;
  char *empty_pcust = NULL;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL;
  int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index;
  int mv_num = 0, mv_num_save = 0;
  time_t start, duration;
  pid_t writer_pid = getpid();

  char *json_buf = NULL;
  int json_buf_off = 0;

#ifdef WITH_AVRO
  avro_writer_t avro_writer;
  char *avro_buf = NULL;
  int avro_buffer_full = FALSE;
#endif

  p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file);

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  if (!config.sql_table) config.sql_table = default_kafka_topic;
  else {
    if (strchr(config.sql_table, '$')) {
      is_topic_dyn = TRUE;
      orig_kafka_topic = config.sql_table;
    }
  }

  if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table;

  p_kafka_init_topic_rr(&kafkap_kafka_host);
  p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr);

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);

  p_kafka_connect_to_produce(&kafkap_kafka_host);
  p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port);
  if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table);
  p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition);
  p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen);

  if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR);
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN);
  else {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++)
    stop = P_preprocess_funcs[j](queue, &index, j);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_init_json(config.name, writer_pid);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  if (config.message_broker_output & PRINT_OUTPUT_JSON) {
    if (config.sql_multi_values) {
      json_buf = malloc(config.sql_multi_values);

      if (!json_buf) {
	Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type);
	exit_plugin(1);
      }
      else memset(json_buf, 0, config.sql_multi_values);
    }
  }
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
    if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN;

    avro_buf = malloc(config.avro_buffer_size);

    if (!avro_buf) {
      Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type);
      exit_plugin(1);
    }
    else memset(avro_buf, 0, config.avro_buffer_size);

    avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size);
#endif
  }

  for (j = 0; j < index; j++) {
    void *json_obj;
    char *json_str;

    if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->ptun) ptun = queue[j]->ptun;
    else ptun = &empty_ptun;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->pvlen) pvlen = queue[j]->pvlen;
    else pvlen = NULL;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
      json_t *json_obj = json_object();
      int idx;

      for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]);
      add_writer_name_and_pid_json(json_obj, config.name, writer_pid);

      json_str = compose_json_str(json_obj);
#endif
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema);
      avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                           &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter,
                           queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
                           &queue[j]->basetime, queue[j]->stitch, avro_iface);
      size_t avro_value_size;

      add_writer_name_and_pid_avro(avro_value, config.name, writer_pid);
      avro_value_sizeof(&avro_value, &avro_value_size);

      if (avro_value_size > config.avro_buffer_size) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n",
            config.name, config.type, config.avro_buffer_size);
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n",
            config.name, config.type);
        exit_plugin(1);
      }
      else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) {
        avro_buffer_full = TRUE;
        j--;
      }
      else if (avro_value_write(avro_writer, &avro_value)) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n",
            config.name, config.type, avro_strerror());
        exit_plugin(1);
      }
      else {
        mv_num++;
      }

      avro_value_decref(&avro_value);
      avro_value_iface_decref(avro_iface);
#else
      if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type);
#endif
    }

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      char *tmp_str = NULL;

      if (json_str && config.sql_multi_values) {
        int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0);

	if (json_strlen >= (config.sql_multi_values - json_buf_off)) {
	  if (json_strlen >= config.sql_multi_values) {
	    Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); 
	    exit(1);
	  }

	  tmp_str = json_str;
	  json_str = json_buf;
	}
	else {
	  strcat(json_buf, json_str);
	  mv_num++;

	  string_add_newline(json_buf);
	  json_buf_off = strlen(json_buf);

	  free(json_str);
	  json_str = NULL;
	}
      }

      if (json_str) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

	if (config.sql_multi_values) {
	  json_str = tmp_str;
	  strcpy(json_buf, json_str);

	  mv_num_save = mv_num;
	  mv_num = 1;

          string_add_newline(json_buf);
          json_buf_off = strlen(json_buf);
        }

        free(json_str);
        json_str = NULL;

        if (!ret) {
          if (!config.sql_multi_values) qn++;
          else qn += mv_num_save;
        }
        else break;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_reset(avro_writer);
        avro_buffer_full = FALSE;
        mv_num_save = mv_num;
        mv_num = 0;

        if (!ret) qn += mv_num_save;
        else break;
      }
#endif
    }
  }

  if (config.sql_multi_values) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      if (json_buf && json_buf_off) {
	/* no handling of dyn routing keys here: not compatible */
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf);
	ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf));

	if (!ret) qn += mv_num;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (avro_writer_tell(avro_writer)) {
        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_free(avro_writer);

        if (!ret) qn += mv_num;
      }
#endif
    }
  }

  duration = time(NULL)-start;

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
	sleep(1); /* Let's give a small delay to facilitate purge_close being
		     the last message in batch in case of partitioned topics */
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  p_kafka_close(&kafkap_kafka_host, FALSE);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, qn, saved_index, duration);

  if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); 

  if (empty_pcust) free(empty_pcust);

  if (json_buf) free(json_buf);

#ifdef WITH_AVRO
  if (avro_buf) free(avro_buf);
#endif
}
예제 #13
0
void PG_cache_purge(struct db_cache *queue[], int index, struct insert_data *idata)
{
  PGresult *ret;
  struct logfile lf;
  struct db_cache **reprocess_queries_queue, **bulk_reprocess_queries_queue;
  char orig_insert_clause[LONGSRVBUFLEN], orig_update_clause[LONGSRVBUFLEN], orig_lock_clause[LONGSRVBUFLEN];
  char orig_copy_clause[LONGSRVBUFLEN], tmpbuf[LONGLONGSRVBUFLEN], tmptable[SRVBUFLEN];
  time_t start;
  int j, r, reprocess = 0, stop, go_to_pending, reprocess_idx, bulk_reprocess_idx, saved_index = index;
  struct primitives_ptrs prim_ptrs;
  struct pkt_data dummy_data;
  pid_t writer_pid = getpid();

  if (!index) {
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: 0/0, ET: 0) ***\n", config.name, config.type, writer_pid);
    return;
  }

  bed.lf = &lf;
  memset(&lf, 0, sizeof(struct logfile));
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  memset(&dummy_data, 0, sizeof(dummy_data));

  reprocess_queries_queue = (struct db_cache **) malloc(qq_size*sizeof(struct db_cache *));
  bulk_reprocess_queries_queue = (struct db_cache **) malloc(qq_size*sizeof(struct db_cache *));
  if (!reprocess_queries_queue || !bulk_reprocess_queries_queue) {
    Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (reprocess_queries_queue). Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  for (j = 0, stop = 0; (!stop) && sql_preprocess_funcs[j]; j++) 
    stop = sql_preprocess_funcs[j](queue, &index, j);
  if (config.what_to_count & COUNT_CLASS)
    sql_invalidate_shadow_entries(queue, &index);
  idata->ten = index;

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  /* re-using pending queries queue stuff from parent and saving clauses */
  memcpy(pending_queries_queue, queue, index*sizeof(struct db_cache *));
  pqq_ptr = index;

  strlcpy(orig_copy_clause, copy_clause, LONGSRVBUFLEN);
  strlcpy(orig_insert_clause, insert_clause, LONGSRVBUFLEN);
  strlcpy(orig_update_clause, update_clause, LONGSRVBUFLEN);
  strlcpy(orig_lock_clause, lock_clause, LONGSRVBUFLEN);

  start:
  memcpy(queue, pending_queries_queue, pqq_ptr*sizeof(struct db_cache *));
  memset(pending_queries_queue, 0, pqq_ptr*sizeof(struct db_cache *));
  index = pqq_ptr; pqq_ptr = 0;

  /* We check for variable substitution in SQL table */
  if (idata->dyn_table) {
    time_t stamp = 0;

    memset(tmpbuf, 0, LONGLONGSRVBUFLEN);
    stamp = queue[0]->basetime;

    prim_ptrs.data = &dummy_data;
    primptrs_set_all_from_db_cache(&prim_ptrs, queue[0]);

    strlcpy(idata->dyn_table_name, config.sql_table, SRVBUFLEN);
    strlcpy(insert_clause, orig_insert_clause, LONGSRVBUFLEN);
    strlcpy(update_clause, orig_update_clause, LONGSRVBUFLEN);
    strlcpy(lock_clause, orig_lock_clause, LONGSRVBUFLEN);

    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, copy_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, insert_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, update_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, lock_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, idata->dyn_table_name, &prim_ptrs);

    strftime_same(copy_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(insert_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(update_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(lock_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(idata->dyn_table_name, LONGSRVBUFLEN, tmpbuf, &stamp);

    if (config.sql_table_schema) sql_create_table(bed.p, &stamp, &prim_ptrs); 
  }

  /* beginning DB transaction */
  (*sqlfunc_cbr.lock)(bed.p);

  /* for each element of the queue to be processed we execute sql_query(); the function
     returns a non-zero value if DB has failed; then we use reprocess_queries_queue and
     bulk_reprocess_queries_queue to handle reprocessing of specific elements or bulk
     queue (of elements not being held in a pending_queries_queue) due to final COMMIT
     failure */

  memset(reprocess_queries_queue, 0, qq_size*sizeof(struct db_cache *));
  memset(bulk_reprocess_queries_queue, 0, qq_size*sizeof(struct db_cache *));
  reprocess_idx = 0; bulk_reprocess_idx = 0;

  for (j = 0; j < index; j++) {
    go_to_pending = FALSE;

    if (idata->dyn_table) {
      time_t stamp = 0;

      memset(tmpbuf, 0, LONGLONGSRVBUFLEN); // XXX: pedantic?
      stamp = queue[idata->current_queue_elem]->basetime;
      strlcpy(tmptable, config.sql_table, SRVBUFLEN);

      prim_ptrs.data = &dummy_data;
      primptrs_set_all_from_db_cache(&prim_ptrs, queue[idata->current_queue_elem]);
      handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, tmptable, &prim_ptrs);
      strftime_same(tmptable, LONGSRVBUFLEN, tmpbuf, &stamp);

      if (strncmp(idata->dyn_table_name, tmptable, SRVBUFLEN)) {
        pending_queries_queue[pqq_ptr] = queue[idata->current_queue_elem];

        pqq_ptr++;
        go_to_pending = TRUE;
      }
    }

    if (!go_to_pending) { 
      if (queue[j]->valid) {
	r = sql_query(&bed, queue[j], idata);

	/* note down all elements in case of a reprocess due to COMMIT failure */
	bulk_reprocess_queries_queue[bulk_reprocess_idx] = queue[j];
	bulk_reprocess_idx++;
      }
      else r = FALSE; /* not valid elements are marked as not to be reprocessed */ 
      if (r) {
        reprocess_queries_queue[reprocess_idx] = queue[j];
        reprocess_idx++;

	if (!reprocess) sql_db_fail(&p);
        reprocess = REPROCESS_SPECIFIC;
      }
    }
  }

  /* Finalizing DB transaction */
  if (!p.fail) {
    if (config.sql_use_copy) {
      if (PQputCopyEnd(p.desc, NULL) < 0) Log(LOG_ERR, "ERROR ( %s/%s ): COPY failed!\n\n", config.name, config.type); 
    }

    ret = PQexec(p.desc, "COMMIT");
    if (PQresultStatus(ret) != PGRES_COMMAND_OK) {
      if (!reprocess) sql_db_fail(&p);
      reprocess = REPROCESS_BULK;
    }
    PQclear(ret);
  }

  /* don't reprocess free (SQL_CACHE_FREE) and already recovered (SQL_CACHE_ERROR) elements */
  if (p.fail) {
    if (reprocess == REPROCESS_SPECIFIC) {
      for (j = 0; j < reprocess_idx; j++) {
        if (reprocess_queries_queue[j]->valid == SQL_CACHE_COMMITTED) sql_query(&bed, reprocess_queries_queue[j], idata);
      }
    }
    else if (reprocess == REPROCESS_BULK) {
      for (j = 0; j < bulk_reprocess_idx; j++) {
        if (bulk_reprocess_queries_queue[j]->valid == SQL_CACHE_COMMITTED) sql_query(&bed, bulk_reprocess_queries_queue[j], idata);
      }
    }
  }

  if (b.connected) {
    if (config.sql_use_copy) {
      if (PQputCopyEnd(b.desc, NULL) < 0) Log(LOG_ERR, "ERROR ( %s/%s ): COPY failed!\n\n", config.name, config.type);
    }
    ret = PQexec(b.desc, "COMMIT");
    if (PQresultStatus(ret) != PGRES_COMMAND_OK) sql_db_fail(&b);
    PQclear(ret);
  }

  /* rewinding stuff */
  if (lf.file) PG_file_close(&lf);
  if (lf.fail || b.fail) Log(LOG_ALERT, "ALERT ( %s/%s ): recovery for PgSQL operation failed.\n", config.name, config.type);

  /* If we have pending queries then start again */
  if (pqq_ptr) goto start;

  idata->elap_time = time(NULL)-start;
  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, idata->qn, saved_index, idata->elap_time);

  if (config.sql_trigger_exec) {
    if (!config.debug) idata->elap_time = time(NULL)-start;
    SQL_SetENV_child(idata);
  }
}
예제 #14
0
/* Functions */
void imt_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr) 
{
  int maxqsize = (MAX_QUERIES*sizeof(struct pkt_primitives))+sizeof(struct query_header)+2;
  struct sockaddr cAddr;
  struct pkt_data *data;
  struct ports_table pt;
  unsigned char srvbuf[maxqsize];
  unsigned char *srvbufptr;
  struct query_header *qh;
  unsigned char *pipebuf;
  char path[] = "/tmp/collect.pipe";
  short int go_to_clear = FALSE;
  u_int32_t request, sz;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  int datasize = ((struct channels_list_entry *)ptr)->datasize;
  struct extra_primitives extras;
  unsigned char *rgptr;
  int pollagain = 0;
  u_int32_t seq = 0;
  int rg_err_count = 0;
  int ret;
  struct pkt_bgp_primitives *pbgp, empty_pbgp;
  struct pkt_nat_primitives *pnat, empty_pnat;
  struct pkt_mpls_primitives *pmpls, empty_pmpls;
  char *pcust, empty_pcust[] = "";
  struct pkt_vlen_hdr_primitives *pvlen, empty_pvlen;
  struct networks_file_data nfd;
  struct timeval select_timeout;
  struct primitives_ptrs prim_ptrs;

  fd_set read_descs, bkp_read_descs; /* select() stuff */
  int select_fd, lock = FALSE;
  int cLen, num, sd, sd2;
  char *dataptr;

  memcpy(&config, cfgptr, sizeof(struct configuration));
  memcpy(&extras, &((struct channels_list_entry *)ptr)->extras, sizeof(struct extra_primitives));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "IMT Plugin", config.name);

  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);
  if (config.logfile) {
    fclose(config.logfile_fd);
    config.logfile_fd = open_logfile(config.logfile, "a");
  }

  if (extras.off_pkt_vlen_hdr_primitives) {
    Log(LOG_ERR, "ERROR ( %s/%s ): variable-length primitives, ie. label, are not supported in IMT plugin. Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  reload_map = FALSE;
  status->wakeup = TRUE;

  /* a bunch of default definitions and post-checks */
  pipebuf = (unsigned char *) malloc(config.buffer_size);
  if (!pipebuf) {
    Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (pipebuf). Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  setnonblocking(pipe_fd);
  memset(pipebuf, 0, config.buffer_size);
  no_more_space = FALSE;

  if (config.what_to_count & (COUNT_SUM_HOST|COUNT_SUM_NET))
    insert_func = sum_host_insert;
  else if (config.what_to_count & COUNT_SUM_PORT) insert_func = sum_port_insert;
  else if (config.what_to_count & COUNT_SUM_AS) insert_func = sum_as_insert;
#if defined (HAVE_L2)
  else if (config.what_to_count & COUNT_SUM_MAC) insert_func = sum_mac_insert;
#endif
  else insert_func = insert_accounting_structure;

  memset(&nt, 0, sizeof(nt));
  memset(&nc, 0, sizeof(nc));
  memset(&pt, 0, sizeof(pt));

  load_networks(config.networks_file, &nt, &nc);
  set_net_funcs(&nt);

  if (config.ports_file) load_ports(config.ports_file, &pt);
  if (config.pkt_len_distrib_bins_str) load_pkt_len_distrib_bins();
  else {
    if (config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB) {
      Log(LOG_ERR, "ERROR ( %s/%s ): 'aggregate' contains pkt_len_distrib but no 'pkt_len_distrib_bins' defined. Exiting.\n", config.name, config.type);
      exit_plugin(1); 
    }
  }

  if ((!config.num_memory_pools) && (!have_num_memory_pools))
    config.num_memory_pools = NUM_MEMORY_POOLS;
  
  if (!config.memory_pool_size) config.memory_pool_size = MEMORY_POOL_SIZE;  
  else {
    if (config.memory_pool_size < sizeof(struct acc)) {
      Log(LOG_WARNING, "WARN ( %s/%s ): enforcing memory pool's minimum size, %d bytes.\n", config.name, config.type, sizeof(struct acc));
      config.memory_pool_size = MEMORY_POOL_SIZE;
    }
  }

  if (!config.imt_plugin_path) config.imt_plugin_path = path; 
  if (!config.buckets) config.buckets = MAX_HOSTS;

  init_memory_pool_table(config);
  if (mpd == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate memory pools table\n", config.name, config.type);
    exit_plugin(1);
  }

  current_pool = request_memory_pool(config.buckets*sizeof(struct acc));
  if (current_pool == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate first memory pool, try with larger value.\n", config.name, config.type);
    exit_plugin(1);
  }
  a = current_pool->base_ptr;

  lru_elem_ptr = malloc(config.buckets*sizeof(struct acc *));
  if (lru_elem_ptr == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate LRU element pointers.\n", config.name, config.type);
    exit_plugin(1);
  }
  else memset(lru_elem_ptr, 0, config.buckets*sizeof(struct acc *));

  current_pool = request_memory_pool(config.memory_pool_size);
  if (current_pool == NULL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate more memory pools, try with larger value.\n", config.name, config.type);
    exit_plugin(1);
  }

  signal(SIGHUP, reload); /* handles reopening of syslog channel */
  signal(SIGINT, exit_now); /* exit lane */
  signal(SIGUSR1, SIG_IGN);
  signal(SIGUSR2, reload_maps);
  signal(SIGPIPE, SIG_IGN);
  signal(SIGCHLD, SIG_IGN); 

  memset(&empty_pbgp, 0, sizeof(empty_pbgp));
  memset(&empty_pnat, 0, sizeof(empty_pnat));
  memset(&empty_pmpls, 0, sizeof(empty_pmpls));
  memset(&empty_pvlen, 0, sizeof(empty_pvlen));

  /* building a server for interrogations by clients */
  sd = build_query_server(config.imt_plugin_path);
  cLen = sizeof(cAddr);

  /* preparing for synchronous I/O multiplexing */
  select_fd = 0;
  FD_ZERO(&read_descs);
  FD_SET(sd, &read_descs);
  if (sd > select_fd) select_fd = sd;
  FD_SET(pipe_fd, &read_descs);
  if (pipe_fd > select_fd) select_fd = pipe_fd;
  select_fd++;
  memcpy(&bkp_read_descs, &read_descs, sizeof(read_descs));

  qh = (struct query_header *) srvbuf;

  /* plugin main loop */
  for(;;) {
    select_again:
    select_timeout.tv_sec = DEFAULT_IMT_PLUGIN_SELECT_TIMEOUT;
    select_timeout.tv_usec = 0;

    memcpy(&read_descs, &bkp_read_descs, sizeof(bkp_read_descs));
    num = select(select_fd, &read_descs, NULL, NULL, &select_timeout);
    if (num <= 0) {
      if (getppid() == 1) {
	Log(LOG_ERR, "ERROR ( %s/%s ): Core process *seems* gone. Exiting.\n", config.name, config.type);
	exit_plugin(1);
      } 

      goto select_again;  
    }

    gettimeofday(&cycle_stamp, NULL);

    /* doing server tasks */
    if (FD_ISSET(sd, &read_descs)) {
      struct pollfd pfd;
      int ret;

      sd2 = accept(sd, &cAddr, &cLen);
      setblocking(sd2);
      srvbufptr = srvbuf;
      sz = maxqsize;

      pfd.fd = sd2;
      pfd.events = POLLIN;

      recv_again:
      ret = poll(&pfd, 1, 1000);
      if (ret == 0) {
        Log(LOG_WARNING, "WARN ( %s/%s ): Timed out while processing fragmented query.\n", config.name, config.type); 
        close(sd2);
	goto select_again;
      }
      else {
        num = recv(sd2, srvbufptr, sz, 0);
        if (srvbufptr[num-1] != '\x4') {
	  srvbufptr += num;
	  sz -= num;
	  goto recv_again; /* fragmented query */
        }
      }

      num = num+(maxqsize-sz);

      if (qh->num > MAX_QUERIES) {
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): request discarded. Too much queries.\n", config.name, config.type);
	close(sd2);
	continue;
      }

      request = qh->type;
      if (request & WANT_RESET) request ^= WANT_RESET;
      if (request & WANT_LOCK_OP) {
	lock = TRUE;
	request ^= WANT_LOCK_OP;
      }

      /* 
	 - if explicitely required, we do not fork: query obtains exclusive
	   control - lock - over the memory table; 
	 - operations that may cause inconsistencies (full erasure, counter
	   reset for individual entries, etc.) are entitled of an exclusive
	   lock.
	 - if query is matter of just a single short-lived walk through the
	   table, we avoid fork(): the plugin will serve the request;
         - in all other cases, we fork; the newly created child will serve
	   queries asyncronously.
      */

      if (request & WANT_ERASE) {
	request ^= WANT_ERASE;
	if (request) {
	  if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
	  else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
	}
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
	go_to_clear = TRUE;  
      }
      else if (((request == WANT_COUNTER) || (request == WANT_MATCH)) &&
	(qh->num == 1) && (qh->what_to_count == config.what_to_count)) {
	if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      } 
      else if (request == WANT_CLASS_TABLE) {
	if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      }
      else if (request == WANT_PKT_LEN_DISTRIB_TABLE) {
        if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
        else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. ERRNO: %d\n", config.name, config.type, num, errno);
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
      }
      else {
	if (lock) {
	  if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, FALSE);
          else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
          Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
	}
	else { 
          switch (fork()) {
	  case -1: /* Something went wrong */
	    Log(LOG_WARNING, "WARN ( %s/%s ): Unable to serve client query: %s\n", config.name, config.type, strerror(errno));
	    break;
          case 0: /* Child */
            close(sd);
	    pm_setproctitle("%s [%s]", "IMT Plugin -- serving client", config.name);
            if (num > 0) process_query_data(sd2, srvbuf, num, &extras, datasize, TRUE);
	    else Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d incoming bytes. Errno: %d\n", config.name, config.type, num, errno);
            Log(LOG_DEBUG, "DEBUG ( %s/%s ): Closing connection with client ...\n", config.name, config.type);
            close(sd2);
            exit(0);
          default: /* Parent */
            break;
          } 
	}
      }
      close(sd2);
    }

    /* clearing stats if requested */
    if (go_to_clear) {
      /* When using extended BGP features we need to
	 free() up memory allocations before erasing */ 
      /* XXX: given the current use of empty_* vars we have always to
         free_extra_allocs() in order to prevent memory leaks */
      /*
      if (extras.off_pkt_bgp_primitives || extras.off_pkt_nat_primitives ||
	  extras.off_pkt_mpls_primitives || extras.off_custom_primitives)
      */
	free_extra_allocs(); 
      clear_memory_pool_table();
      current_pool = request_memory_pool(config.buckets*sizeof(struct acc));
      if (current_pool == NULL) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Cannot allocate my first memory pool, try with larger value.\n", config.name, config.type);
        exit_plugin(1);
      }
      a = current_pool->base_ptr;

      current_pool = request_memory_pool(config.memory_pool_size);
      if (current_pool == NULL) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Cannot allocate more memory pools, try with larger value.\n", config.name, config.type);
        exit_plugin(1);
      }
      go_to_clear = FALSE;
      no_more_space = FALSE;
      memcpy(&table_reset_stamp, &cycle_stamp, sizeof(struct timeval));
    }

    if (FD_ISSET(pipe_fd, &read_descs)) {
      if (!pollagain) {
        seq++;
        seq %= MAX_SEQNUM;
      }

      pollagain = FALSE;
      if ((num = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0)
        exit_plugin(1); /* we exit silently; something happened at the write end */

      if (num < 0) {
        pollagain = TRUE;
        goto select_again;
      }

      memcpy(pipebuf, rgptr, config.buffer_size);
      if (((struct ch_buf_hdr *)pipebuf)->seq != seq) {
        rg_err_count++;
        if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
          Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
          Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
          Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%d'.\n", config.pipe_size);
          Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%d'.\n", config.buffer_size);
          Log(LOG_ERR, "- increase system maximum socket size.\n\n");
          seq = ((struct ch_buf_hdr *)pipebuf)->seq;
	}
      }

      if (num > 0) {
	data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));

	while (((struct ch_buf_hdr *)pipebuf)->num > 0) {

	  // XXX: to be optimized: remove empty_* vars
          if (extras.off_pkt_bgp_primitives)
	    pbgp = (struct pkt_bgp_primitives *) ((u_char *)data + extras.off_pkt_bgp_primitives);
	  else pbgp = &empty_pbgp;
          if (extras.off_pkt_nat_primitives) 
            pnat = (struct pkt_nat_primitives *) ((u_char *)data + extras.off_pkt_nat_primitives);
          else pnat = &empty_pnat;
          if (extras.off_pkt_mpls_primitives) 
            pmpls = (struct pkt_mpls_primitives *) ((u_char *)data + extras.off_pkt_mpls_primitives);
          else pmpls = &empty_pmpls;
          if (extras.off_custom_primitives)
	    pcust = ((u_char *)data + extras.off_custom_primitives);
          else pcust = empty_pcust;
	  if (extras.off_pkt_vlen_hdr_primitives)
	    pvlen = (struct pkt_vlen_hdr_primitives *) ((u_char *)data + extras.off_pkt_vlen_hdr_primitives); 
	  else pvlen = &empty_pvlen;

	  for (num = 0; net_funcs[num]; num++)
	    (*net_funcs[num])(&nt, &nc, &data->primitives, pbgp, &nfd);

	  if (config.ports_file) {
	    if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
	    if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
	  }

	  if (config.pkt_len_distrib_bins_str &&
	      config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB)
	    evaluate_pkt_len_distrib(data);

	  prim_ptrs.data = data; 
	  prim_ptrs.pbgp = pbgp; 
	  prim_ptrs.pnat = pnat;
	  prim_ptrs.pmpls = pmpls;
	  prim_ptrs.pcust = pcust;
	  prim_ptrs.pvlen = pvlen;
	  
          (*insert_func)(&prim_ptrs);

	  ((struct ch_buf_hdr *)pipebuf)->num--;
	  if (((struct ch_buf_hdr *)pipebuf)->num) {
            dataptr = (unsigned char *) data;
            dataptr += datasize;
            data = (struct pkt_data *) dataptr;
	  }
        }
      }
    } 

    if (reload_map) {
      load_networks(config.networks_file, &nt, &nc);
      load_ports(config.ports_file, &pt);
      reload_map = FALSE;
    }
  }
}
예제 #15
0
void kafka_cache_purge(struct chained_cache *queue[], int index)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  char *pcust = NULL;
  struct pkt_vlen_hdr_primitives *pvlen = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  char *empty_pcust = NULL;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL;
  int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index;
  int mv_num = 0, mv_num_save = 0;
  time_t start, duration;
  pid_t writer_pid = getpid();

#ifdef WITH_JANSSON
  json_t *array = json_array();
#endif

  p_kafka_init_host(&kafkap_kafka_host);

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  if (!config.sql_table) config.sql_table = default_kafka_topic;
  else {
    if (strchr(config.sql_table, '$')) {
      is_topic_dyn = TRUE;
      orig_kafka_topic = config.sql_table;
      config.sql_table = dyn_kafka_topic;
    }
  }
  if (config.amqp_routing_key_rr) {
    orig_kafka_topic = config.sql_table;
    config.sql_table = dyn_kafka_topic;
  }

  p_kafka_init_topic_rr(&kafkap_kafka_host);
  p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr);

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);

  p_kafka_connect_to_produce(&kafkap_kafka_host);
  p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port);
  p_kafka_set_topic(&kafkap_kafka_host, config.sql_table);
  p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition);
  p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen);
  p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR);

  for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++)
    stop = P_preprocess_funcs[j](queue, &index, j);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  if (config.print_markers) {
    void *json_obj;
    char *json_str;

    json_obj = compose_purge_init_json(writer_pid);
    if (json_obj) json_str = compose_json_str(json_obj);
    if (json_str) {
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;
    }
  }

  for (j = 0; j < index; j++) {
    void *json_obj;
    char *json_str;

    if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->pvlen) pvlen = queue[j]->pvlen;
    else pvlen = NULL;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    json_obj = compose_json(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                         &queue[j]->primitives, pbgp, pnat, pmpls, pcust, pvlen, queue[j]->bytes_counter,
			 queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
			 &queue[j]->basetime, queue[j]->stitch);

    json_str = compose_json_str(json_obj);

#ifdef WITH_JANSSON
    if (json_str && config.sql_multi_values) {
      json_t *elem = NULL;
      char *tmp_str = json_str;
      int do_free = FALSE;

      if (json_array_size(array) >= config.sql_multi_values) {
	json_str = json_dumps(array, 0);
	json_array_clear(array);
        mv_num_save = mv_num;
        mv_num = 0;
      }
      else do_free = TRUE;

      elem = json_loads(tmp_str, 0, NULL);
      json_array_append_new(array, elem);
      mv_num++;

      if (do_free) {
        free(json_str);
        json_str = NULL;
      }
    }
#endif

    if (json_str) {
      if (is_topic_dyn) {
	P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
	p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
      }

      if (config.amqp_routing_key_rr) {
        P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
	p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
      }

      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;

      if (!ret) {
	if (!config.sql_multi_values) qn++;
	else qn += mv_num_save;
      }
      else break;
    }
  }

#ifdef WITH_JANSSON
  if (config.sql_multi_values && json_array_size(array)) {
    char *json_str;

    json_str = json_dumps(array, 0);
    json_array_clear(array);
    json_decref(array);

    if (json_str) {
      /* no handling of dyn routing keys here: not compatible */
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;

      if (!ret) qn += mv_num;
    }
  }
#endif

  duration = time(NULL)-start;

  if (config.print_markers) {
    void *json_obj;
    char *json_str;

    json_obj = compose_purge_close_json(writer_pid, qn, saved_index, duration);
    if (json_obj) json_str = compose_json_str(json_obj);
    if (json_str) {
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;
    }
  }

  p_kafka_close(&kafkap_kafka_host, FALSE);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, qn, saved_index, duration);

  if (config.sql_trigger_exec) P_trigger_exec(config.sql_trigger_exec); 

  if (empty_pcust) free(empty_pcust);
}
예제 #16
0
int MY_compose_static_queries()
{
  int primitives=0, set_primitives=0, set_event_primitives=0, have_flows=0;

  if (config.what_to_count & COUNT_FLOWS || (config.sql_table_version >= 4 &&
					     config.sql_table_version < SQL_TABLE_VERSION_BGP &&
					     !config.sql_optimize_clauses)) {
    config.what_to_count |= COUNT_FLOWS;
    have_flows = TRUE;

    if ((config.sql_table_version < 4 || config.sql_table_version >= SQL_TABLE_VERSION_BGP) && !config.sql_optimize_clauses) {
      Log(LOG_ERR, "ERROR ( %s/%s ): The accounting of flows requires SQL table v4. Exiting.\n", config.name, config.type);
      exit_plugin(1);
    }
  }

  /* "INSERT INTO ... VALUES ... " and "... WHERE ..." stuff */
  strncpy(where[primitives].string, " WHERE ", sizeof(where[primitives].string));
  snprintf(insert_clause, sizeof(insert_clause), "INSERT INTO `%s` (", config.sql_table);
  strncpy(values[primitives].string, " VALUES (", sizeof(values[primitives].string));
  primitives = MY_evaluate_history(primitives);
  primitives = sql_evaluate_primitives(primitives);

  strncpy(insert_counters_clause, ", packets, bytes", SPACELEFT(insert_counters_clause));
  if (have_flows) strncat(insert_counters_clause, ", flows", SPACELEFT(insert_counters_clause));
  strncat(insert_counters_clause, ")", SPACELEFT(insert_counters_clause));
  strncpy(insert_nocounters_clause, ")", SPACELEFT(insert_nocounters_clause));

  /* "LOCK ..." stuff */
  snprintf(lock_clause, sizeof(lock_clause), "LOCK TABLES `%s` WRITE", config.sql_table);
  strncpy(unlock_clause, "UNLOCK TABLES", sizeof(unlock_clause));

  /* "UPDATE ... SET ..." stuff */
  snprintf(update_clause, sizeof(update_clause), "UPDATE `%s` ", config.sql_table);

  set_primitives = sql_compose_static_set(have_flows);
  set_event_primitives = sql_compose_static_set_event();

  if (config.sql_history) {
    if (!config.sql_history_since_epoch) {
      strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
      strncat(set[set_primitives].string, "stamp_updated=NOW()", SPACELEFT(set[set_primitives].string));
      set[set_primitives].type = TIMESTAMP;
      set[set_primitives].handler = count_noop_setclause_handler;
      set_primitives++;

      if (set_event_primitives) strncpy(set_event[set_event_primitives].string, ", ", SPACELEFT(set_event[set_event_primitives].string));
      else strncpy(set_event[set_event_primitives].string, "SET ", SPACELEFT(set_event[set_event_primitives].string));
      strncat(set_event[set_event_primitives].string, "stamp_updated=NOW()", SPACELEFT(set_event[set_event_primitives].string));
      set_event[set_event_primitives].type = TIMESTAMP;
      set_event[set_event_primitives].handler = count_noop_setclause_event_handler;
      set_event_primitives++;
    }
    else {
      strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
      strncat(set[set_primitives].string, "stamp_updated=UNIX_TIMESTAMP(NOW())", SPACELEFT(set[set_primitives].string));
      set[set_primitives].type = TIMESTAMP;
      set[set_primitives].handler = count_noop_setclause_handler;
      set_primitives++;

      if (set_event_primitives) strncpy(set_event[set_event_primitives].string, ", ", SPACELEFT(set_event[set_event_primitives].string));
      else strncpy(set_event[set_event_primitives].string, "SET ", SPACELEFT(set_event[set_event_primitives].string));
      strncat(set_event[set_event_primitives].string, "stamp_updated=UNIX_TIMESTAMP(NOW())", SPACELEFT(set_event[set_event_primitives].string));
      set_event[set_event_primitives].type = TIMESTAMP;
      set_event[set_event_primitives].handler = count_noop_setclause_event_handler;
      set_event_primitives++;
    }
  }

  return primitives;
}
예제 #17
0
int MY_cache_dbop(struct DBdesc *db, struct db_cache *cache_elem, struct insert_data *idata)
{
  char *ptr_values, *ptr_where, *ptr_mv, *ptr_set, *ptr_insert;
  int num=0, num_set=0, ret=0, have_flows=0, len=0;

  if (idata->mv.last_queue_elem) {
    ret = mysql_query(db->desc, multi_values_buffer);
    Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d VALUES statements sent to the MySQL server.\n",
                    config.name, config.type, idata->mv.buffer_elem_num);
    if (ret) goto signal_error;
    idata->iqn++;
    idata->mv.buffer_elem_num = FALSE;
    idata->mv.buffer_offset = 0;

    return FALSE;
  }

  if (config.what_to_count & COUNT_FLOWS) have_flows = TRUE;

  /* constructing sql query */
  ptr_where = where_clause;
  ptr_values = values_clause; 
  ptr_set = set_clause;
  ptr_insert = insert_full_clause;
  memset(where_clause, 0, sizeof(where_clause));
  memset(values_clause, 0, sizeof(values_clause));
  memset(set_clause, 0, sizeof(set_clause));
  memset(insert_full_clause, 0, sizeof(insert_full_clause));

  for (num = 0; num < idata->num_primitives; num++)
    (*where[num].handler)(cache_elem, idata, num, &ptr_values, &ptr_where);

  if (cache_elem->flow_type == NF9_FTYPE_EVENT || cache_elem->flow_type == NF9_FTYPE_OPTION) {
    for (num_set = 0; set_event[num_set].type; num_set++)
      (*set_event[num_set].handler)(cache_elem, idata, num_set, &ptr_set, NULL);
  }
  else {
    for (num_set = 0; set[num_set].type; num_set++)
      (*set[num_set].handler)(cache_elem, idata, num_set, &ptr_set, NULL);
  }
  
  /* sending UPDATE query a) if not switched off and
     b) if we actually have something to update */
  if (!config.sql_dont_try_update && num_set) {
    strncpy(sql_data, update_clause, SPACELEFT(sql_data));
    strncat(sql_data, set_clause, SPACELEFT(sql_data));
    strncat(sql_data, where_clause, SPACELEFT(sql_data));

    ret = mysql_query(db->desc, sql_data);
    if (ret) goto signal_error; 
  }

  if (config.sql_dont_try_update || !num_set || (mysql_affected_rows(db->desc) == 0)) {
    /* UPDATE failed, trying with an INSERT query */ 
    if (cache_elem->flow_type == NF9_FTYPE_EVENT || cache_elem->flow_type == NF9_FTYPE_OPTION) {
      strncpy(insert_full_clause, insert_clause, SPACELEFT(insert_full_clause));
      strncat(insert_full_clause, insert_nocounters_clause, SPACELEFT(insert_full_clause));
      strncat(ptr_values, ")", SPACELEFT(values_clause));
    }
    else {
      strncpy(insert_full_clause, insert_clause, SPACELEFT(insert_full_clause));
      strncat(insert_full_clause, insert_counters_clause, SPACELEFT(insert_full_clause));
#if defined HAVE_64BIT_COUNTERS
      if (have_flows) snprintf(ptr_values, SPACELEFT(values_clause), ", %llu, %llu, %llu)", cache_elem->packet_counter, cache_elem->bytes_counter, cache_elem->flows_counter);
      else snprintf(ptr_values, SPACELEFT(values_clause), ", %llu, %llu)", cache_elem->packet_counter, cache_elem->bytes_counter);
#else
      if (have_flows) snprintf(ptr_values, SPACELEFT(values_clause), ", %lu, %lu, %lu)", cache_elem->packet_counter, cache_elem->bytes_counter, cache_elem->flows_counter);
      else snprintf(ptr_values, SPACELEFT(values_clause), ", %lu, %lu)", cache_elem->packet_counter, cache_elem->bytes_counter);
#endif
    }

    if (config.sql_multi_values) { 
      multi_values_handling:
      len = config.sql_multi_values-idata->mv.buffer_offset; 
      if (!idata->mv.buffer_elem_num) {
	if (strlen(insert_full_clause) < len) {
	  strncpy(multi_values_buffer, insert_full_clause, config.sql_multi_values);
	  strcat(multi_values_buffer, " VALUES");
	  idata->mv.buffer_offset += strlen(multi_values_buffer);
	  idata->mv.head_buffer_elem = idata->current_queue_elem;
	}
        else {
          Log(LOG_ERR, "ERROR ( %s/%s ): 'sql_multi_values' is too small (%d). Try with a larger value.\n",
                         config.name, config.type, config.sql_multi_values);
          exit_plugin(1);
	}
      }
      len = config.sql_multi_values-idata->mv.buffer_offset; 
      if (strlen(values_clause) < len) { 
	if (idata->mv.buffer_elem_num) {
	  strcpy(multi_values_buffer+idata->mv.buffer_offset, ",");
	  idata->mv.buffer_offset++;
	}
	ptr_mv = multi_values_buffer+idata->mv.buffer_offset;
        strcpy(multi_values_buffer+idata->mv.buffer_offset, values_clause+7); /* cut the initial 'VALUES' */
	idata->mv.buffer_offset += strlen(ptr_mv);
	idata->mv.buffer_elem_num++;
      }
      else {
	if (idata->mv.buffer_elem_num) {
	  ret = mysql_query(db->desc, multi_values_buffer);
	  Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d VALUES statements sent to the MySQL server.\n",
			  config.name, config.type, idata->mv.buffer_elem_num);
	  if (ret) goto signal_error;
	  idata->iqn++;
	  idata->mv.buffer_elem_num = FALSE;
	  idata->mv.head_buffer_elem = FALSE;
	  idata->mv.buffer_offset = 0;
	  goto multi_values_handling;
	}
	else {
	  Log(LOG_ERR, "ERROR ( %s/%s ): 'sql_multi_values' is too small (%d). Try with a larger value.\n",
			 config.name, config.type, config.sql_multi_values);
	  exit_plugin(1);
	}
      } 
    }
    else {
      strncpy(sql_data, insert_full_clause, sizeof(sql_data));
      strncat(sql_data, values_clause, SPACELEFT(sql_data));

      ret = mysql_query(db->desc, sql_data);
      if (ret) goto signal_error; 
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, sql_data);
      idata->iqn++;
    }
  }
  else {
    Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, sql_data);
    idata->uqn++;
  }

  idata->een++;
  // cache_elem->valid = FALSE; /* committed */

  return ret;

  signal_error:
  if (!idata->mv.buffer_elem_num) Log(LOG_DEBUG, "DEBUG ( %s/%s ): FAILED query follows:\n%s\n", config.name, config.type, sql_data);
  else {
    if (!idata->recover || db->type != BE_TYPE_PRIMARY) {
      /* DB failure: we will rewind the multi-values buffer */
      idata->current_queue_elem = idata->mv.head_buffer_elem;
      idata->mv.buffer_elem_num = 0;
    }
  }
  MY_get_errmsg(db);
  if (db->errmsg) Log(LOG_ERR, "ERROR ( %s/%s ): %s\n\n", config.name, config.type, db->errmsg);

  if (mysql_errno(db->desc) == 1062) return FALSE; /* not signalling duplicate entry problems */
  else return ret;
}
예제 #18
0
/* Functions */
void amqp_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr) 
{
  struct pkt_data *data;
  struct ports_table pt;
  unsigned char *pipebuf;
  struct pollfd pfd;
  time_t t, now;
  int timeout, ret, num; 
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  int datasize = ((struct channels_list_entry *)ptr)->datasize;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  struct networks_file_data nfd;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0;

  struct extra_primitives extras;
  struct primitives_ptrs prim_ptrs;
  char *dataptr;

  memcpy(&config, cfgptr, sizeof(struct configuration));
  memcpy(&extras, &((struct channels_list_entry *)ptr)->extras, sizeof(struct extra_primitives));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "RabbitMQ/AMQP Plugin", config.name);

  P_set_signals();
  P_init_default_values();
  pipebuf = (unsigned char *) Malloc(config.buffer_size);
  memset(pipebuf, 0, config.buffer_size);

  timeout = config.sql_refresh_time*1000;

  if (!config.sql_user) config.sql_user = rabbitmq_user;
  if (!config.sql_passwd) config.sql_passwd = rabbitmq_pwd;

  /* setting function pointers */
  if (config.what_to_count & (COUNT_SUM_HOST|COUNT_SUM_NET))
    insert_func = P_sum_host_insert;
  else if (config.what_to_count & COUNT_SUM_PORT) insert_func = P_sum_port_insert;
  else if (config.what_to_count & COUNT_SUM_AS) insert_func = P_sum_as_insert;
#if defined (HAVE_L2)
  else if (config.what_to_count & COUNT_SUM_MAC) insert_func = P_sum_mac_insert;
#endif
  else insert_func = P_cache_insert;
  purge_func = amqp_cache_purge;

  memset(&nt, 0, sizeof(nt));
  memset(&nc, 0, sizeof(nc));
  memset(&pt, 0, sizeof(pt));

  load_networks(config.networks_file, &nt, &nc);
  set_net_funcs(&nt);

  if (config.ports_file) load_ports(config.ports_file, &pt);
  if (config.pkt_len_distrib_bins_str) load_pkt_len_distrib_bins();
  else {
    if (config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB) {
      Log(LOG_ERR, "ERROR ( %s/%s ): 'aggregate' contains pkt_len_distrib but no 'pkt_len_distrib_bins' defined. Exiting.\n", config.name, config.type);
      exit_plugin(1);
    }
  }
  
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  set_primptrs_funcs(&extras);

  pfd.fd = pipe_fd;
  pfd.events = POLLIN;
  setnonblocking(pipe_fd);

  now = time(NULL);

  /* print_refresh time init: deadline */
  refresh_deadline = now; 
  t = roundoff_time(refresh_deadline, config.sql_history_roundoff);
  while ((t+config.sql_refresh_time) < refresh_deadline) t += config.sql_refresh_time;
  refresh_deadline = t;
  refresh_deadline += config.sql_refresh_time; /* it's a deadline not a basetime */

  if (config.sql_history) {
    basetime_init = P_init_historical_acct;
    basetime_eval = P_eval_historical_acct;
    basetime_cmp = P_cmp_historical_acct;

    (*basetime_init)(now);
  }

  /* setting number of entries in _protocols structure */
  while (_protocols[protocols_number].number != -1) protocols_number++;

  /* plugin main loop */
  for(;;) {
    poll_again:
    status->wakeup = TRUE;
    ret = poll(&pfd, 1, timeout);

    if (ret <= 0) {
      if (getppid() == 1) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Core process *seems* gone. Exiting.\n", config.name, config.type);
        exit_plugin(1);
      }

      if (ret < 0) goto poll_again;
    }

    now = time(NULL);

    if (config.sql_history) {
      while (now > (basetime.tv_sec + timeslot)) {
	new_basetime.tv_sec = basetime.tv_sec;
        basetime.tv_sec += timeslot;
        if (config.sql_history == COUNT_MONTHLY)
          timeslot = calc_monthly_timeslot(basetime.tv_sec, config.sql_history_howmany, ADD);
      }
    }

    switch (ret) {
    case 0: /* timeout */
      P_cache_handle_flush_event(&pt);
      break;
    default: /* we received data */
      read_data:
      if (!pollagain) {
        seq++;
        seq %= MAX_SEQNUM;
        if (seq == 0) rg_err_count = FALSE;
      }
      else {
        if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0) 
	  exit_plugin(1); /* we exit silently; something happened at the write end */
      }

      if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
        if (!pollagain) {
          pollagain = TRUE;
          goto poll_again;
        }
        else {
          rg_err_count++;
          if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
            Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
            Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
            Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%u'.\n", config.pipe_size);
            Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%u'.\n", config.buffer_size);
            Log(LOG_ERR, "- increase system maximum socket size.\n\n");
          }
          seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
        }
      }

      pollagain = FALSE;
      memcpy(pipebuf, rg->ptr, bufsz);
      if ((rg->ptr+bufsz) >= rg->end) rg->ptr = rg->base;
      else rg->ptr += bufsz;

      /* lazy refresh time handling */ 
      if (now > refresh_deadline) P_cache_handle_flush_event(&pt);

      data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));

      while (((struct ch_buf_hdr *)pipebuf)->num > 0) {
        for (num = 0; primptrs_funcs[num]; num++)
          (*primptrs_funcs[num])((u_char *)data, &extras, &prim_ptrs);

	for (num = 0; net_funcs[num]; num++)
	  (*net_funcs[num])(&nt, &nc, &data->primitives, prim_ptrs.pbgp, &nfd);

	if (config.ports_file) {
          if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
          if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
        }

        if (config.pkt_len_distrib_bins_str &&
            config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB)
          evaluate_pkt_len_distrib(data);

        prim_ptrs.data = data;
        (*insert_func)(&prim_ptrs);

	((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
          dataptr = (unsigned char *) data;
	  dataptr += datasize;
          data = (struct pkt_data *) dataptr;
	}
      }
      goto read_data;
    }
  }
}
예제 #19
0
void amqp_cache_purge(struct chained_cache *queue[], int index)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  char *pcust = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  char *empty_pcust = NULL;
  struct amqp_basic_properties_t_ amqp_msg_props;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_amqp_routing_key[SRVBUFLEN], *orig_amqp_routing_key = NULL;
  char default_amqp_exchange[] = "pmacct", default_amqp_exchange_type[] = "direct";
  char default_amqp_routing_key[] = "acct";
  int i, j, amqp_status, batch_idx, is_routing_key_dyn = FALSE;
  time_t start, duration;
  pid_t writer_pid = getpid();

  amqp_connection_state_t amqp_conn;
  amqp_socket_t *amqp_socket = NULL;
  amqp_rpc_reply_t amqp_ret;

  /* setting some defaults */
  if (!config.sql_db) config.sql_db = default_amqp_exchange;
  if (!config.sql_table) config.sql_table = default_amqp_routing_key;
  else {
    if (strchr(config.sql_table, '$')) {
      is_routing_key_dyn = TRUE;
      orig_amqp_routing_key = config.sql_table;
      config.sql_table = dyn_amqp_routing_key;
    }
  }
  if (!config.amqp_exchange_type) config.amqp_exchange_type = default_amqp_exchange_type;

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);
  memset(&amqp_msg_props, 0, sizeof(amqp_msg_props));

  amqp_conn = amqp_new_connection();

  amqp_socket = amqp_tcp_socket_new(amqp_conn);
  if (!socket) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: no socket\n", config.name, config.type);
    return;
  }

  if (config.sql_host)
    amqp_status = amqp_socket_open(amqp_socket, config.sql_host, 5672/* default port */);
  else 
    amqp_status = amqp_socket_open(amqp_socket, "127.0.0.1", 5672 /* default port */);

  if (amqp_status) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: unable to open socket\n", config.name, config.type);
    return;
  }

  amqp_ret = amqp_login(amqp_conn, "/", 0, 131072, 0, AMQP_SASL_METHOD_PLAIN, config.sql_user, config.sql_passwd);
  if (amqp_ret.reply_type != AMQP_RESPONSE_NORMAL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: login\n", config.name, config.type);
    return;
  }

  amqp_channel_open(amqp_conn, 1);

  amqp_ret = amqp_get_rpc_reply(amqp_conn);
  if (amqp_ret.reply_type != AMQP_RESPONSE_NORMAL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: unable to open channel\n", config.name, config.type);
    return;
  }

  amqp_exchange_declare(amqp_conn, 1, amqp_cstring_bytes(config.sql_db), amqp_cstring_bytes(config.amqp_exchange_type), 0, 0, amqp_empty_table);
  amqp_ret = amqp_get_rpc_reply(amqp_conn);
  if (amqp_ret.reply_type != AMQP_RESPONSE_NORMAL) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: exchange declare\n", config.name, config.type);
    return;
  }

  if (config.amqp_persistent_msg) {
    amqp_msg_props._flags = (AMQP_BASIC_CONTENT_TYPE_FLAG | AMQP_BASIC_DELIVERY_MODE_FLAG);
    amqp_msg_props.content_type = amqp_cstring_bytes("text/json");
    amqp_msg_props.delivery_mode = 2; /* persistent delivery */
  }

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  for (j = 0; j < index; j++) {
    char *json_str;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    json_str = compose_json(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                         &queue[j]->primitives, pbgp, pnat, pmpls, pcust, queue[j]->bytes_counter,
			 queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
			 &queue[j]->basetime);

    if (json_str) {
      if (is_routing_key_dyn) amqp_handle_routing_key_dyn_strings(config.sql_table, SRVBUFLEN, orig_amqp_routing_key,
								  queue[j]);  

      if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): publishing [E=%s RK=%s DM=%u]: %s\n", config.name,
                            config.type, config.sql_db, config.sql_table, amqp_msg_props.delivery_mode, json_str);

      amqp_basic_publish(amqp_conn, 1, amqp_cstring_bytes(config.sql_db), amqp_cstring_bytes(config.sql_table),
			 0, 0, &amqp_msg_props, amqp_cstring_bytes(json_str));

      amqp_ret = amqp_get_rpc_reply(amqp_conn);
      if (amqp_ret.reply_type != AMQP_RESPONSE_NORMAL) {
	Log(LOG_ERR, "ERROR ( %s/%s ): Connection failed to RabbitMQ: publishing\n", config.name, config.type);
	return;
      } 

      free(json_str);
    }
  }

  amqp_channel_close(amqp_conn, 1, AMQP_REPLY_SUCCESS);
  amqp_connection_close(amqp_conn, AMQP_REPLY_SUCCESS);
  amqp_destroy_connection(amqp_conn);

  duration = time(NULL)-start;
  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u, ET: %u) ***\n",
		config.name, config.type, writer_pid, index, duration);

  if (config.sql_trigger_exec) P_trigger_exec(config.sql_trigger_exec); 
}
예제 #20
0
파일: imt_plugin.c 프로젝트: hrey/pmacct
void exit_now(int signum)
{
  exit_plugin(0);
}
예제 #21
0
void exit_now(int signum)
{
  if (config.imt_plugin_path) unlink(config.imt_plugin_path);
  exit_plugin(0);
}
예제 #22
0
int SQLI_cache_dbop(struct DBdesc *db, struct db_cache *cache_elem, struct insert_data *idata)
{
  char *ptr_values, *ptr_where, *ptr_mv, *ptr_set;
  int num=0, ret=0, have_flows=0, len=0;

  if (idata->mv.last_queue_elem) {
    ret = sqlite3_exec(db->desc, multi_values_buffer, NULL, NULL, NULL);
    Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d INSERT statements sent to the SQLite database.\n",
                config.name, config.type, idata->mv.buffer_elem_num);
    if (ret) goto signal_error;
    idata->iqn++;
    idata->mv.buffer_elem_num = FALSE;
    idata->mv.buffer_offset = 0;

    return FALSE;
  }
  
  if (config.what_to_count & COUNT_FLOWS) have_flows = TRUE;

  /* constructing sql query */
  ptr_where = where_clause;
  ptr_values = values_clause; 
  ptr_set = set_clause;
  memset(where_clause, 0, sizeof(where_clause));
  memset(values_clause, 0, sizeof(values_clause));
  memset(set_clause, 0, sizeof(set_clause));

  for (num = 0; num < idata->num_primitives; num++)
    (*where[num].handler)(cache_elem, idata, num, &ptr_values, &ptr_where);

  for (num = 0; set[num].type; num++)
    (*set[num].handler)(cache_elem, idata, num, &ptr_set, NULL);
  
  /* sending UPDATE query */
  if (!config.sql_dont_try_update) {
    strncpy(sql_data, update_clause, SPACELEFT(sql_data));
    strncat(sql_data, set_clause, SPACELEFT(sql_data));
    strncat(sql_data, where_clause, SPACELEFT(sql_data));

    ret = sqlite3_exec(db->desc, sql_data, NULL, NULL, NULL);
    if (ret) goto signal_error; 
  }

  if (config.sql_dont_try_update || (sqlite3_changes(db->desc) == 0)) {
    /* UPDATE failed, trying with an INSERT query */ 
#if defined HAVE_64BIT_COUNTERS
    if (have_flows) snprintf(ptr_values, SPACELEFT(values_clause), ", %llu, %llu, %llu)", cache_elem->packet_counter, cache_elem->bytes_counter, cache_elem->flows_counter);
    else snprintf(ptr_values, SPACELEFT(values_clause), ", %llu, %llu)", cache_elem->packet_counter, cache_elem->bytes_counter);
#else
    if (have_flows) snprintf(ptr_values, SPACELEFT(values_clause), ", %lu, %lu, %lu)", cache_elem->packet_counter, cache_elem->bytes_counter, cache_elem->flows_counter);
    else snprintf(ptr_values, SPACELEFT(values_clause), ", %lu, %lu)", cache_elem->packet_counter, cache_elem->bytes_counter);
#endif
    
    strncpy(sql_data, insert_clause, sizeof(sql_data));
    strncat(sql_data, values_clause, SPACELEFT(sql_data));

    if (config.sql_multi_values) {
      multi_values_handling:
      len = config.sql_multi_values-idata->mv.buffer_offset;
      if (strlen(values_clause) < len) {
	if (idata->mv.buffer_elem_num) {
	  strcpy(multi_values_buffer+idata->mv.buffer_offset, "; ");
	  idata->mv.buffer_offset++;
	  idata->mv.buffer_offset++;
	}
	ptr_mv = multi_values_buffer+idata->mv.buffer_offset;
	strcpy(multi_values_buffer+idata->mv.buffer_offset, sql_data); 
	idata->mv.buffer_offset += strlen(ptr_mv);
        idata->mv.buffer_elem_num++;
      }
      else {
	if (idata->mv.buffer_elem_num) {
	  ret = sqlite3_exec(db->desc, multi_values_buffer, NULL, NULL, NULL);
	  Log(LOG_DEBUG, "DEBUG ( %s/%s ): %d INSERT statements sent to the SQLite database.\n",
		config.name, config.type, idata->mv.buffer_elem_num);
	  if (ret) goto signal_error;
	  idata->iqn++;
	  idata->mv.buffer_elem_num = FALSE;
	  idata->mv.head_buffer_elem = FALSE;
	  idata->mv.buffer_offset = 0;
	  goto multi_values_handling;
	}
	else {
	  Log(LOG_ERR, "ERROR ( %s/%s ): 'sql_multi_values' is too small (%d). Try with a larger value.\n",
	  config.name, config.type, config.sql_multi_values);
	  exit_plugin(1);
	}
      }
    }
    else {
      ret = sqlite3_exec(db->desc, sql_data, NULL, NULL, NULL);
      Log(LOG_DEBUG, "( %s/%s ): %s\n\n", config.name, config.type, sql_data);
      if (ret) goto signal_error; 
      idata->iqn++;
    }
  }
  else {
    Log(LOG_DEBUG, "( %s/%s ): %s\n\n", config.name, config.type, sql_data);
    idata->uqn++;
  }

  idata->een++;
  // cache_elem->valid = FALSE; /* committed */
  
  return ret;

  signal_error:
  if (!idata->mv.buffer_elem_num) Log(LOG_DEBUG, "DEBUG ( %s/%s ): FAILED query follows:\n%s\n", config.name, config.type, sql_data); 
  else {
    if (!idata->recover || db->type != BE_TYPE_PRIMARY) {
      /* DB failure: we will rewind the multi-values buffer */
      idata->current_queue_elem = idata->mv.head_buffer_elem;
      idata->mv.buffer_elem_num = 0;
    } 
  }
  SQLI_get_errmsg(db);
  if (db->errmsg) Log(LOG_ERR, "ERROR ( %s/%s ): %s\n\n", config.name, config.type, db->errmsg);

  return ret;
}
예제 #23
0
/* Functions */
void pgsql_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr) 
{
  struct pkt_data *data;
  struct ports_table pt;
  struct pollfd pfd;
  struct insert_data idata;
  time_t refresh_deadline;
  int timeout, refresh_timeout, amqp_timeout;
  int ret, num;
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  struct plugins_list_entry *plugin_data = ((struct channels_list_entry *)ptr)->plugin;
  int datasize = ((struct channels_list_entry *)ptr)->datasize;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  struct networks_file_data nfd;
  char *dataptr;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0;

  struct extra_primitives extras;
  struct primitives_ptrs prim_ptrs;

#ifdef WITH_RABBITMQ
  struct p_amqp_host *amqp_host = &((struct channels_list_entry *)ptr)->amqp_host;
#endif

  memcpy(&config, cfgptr, sizeof(struct configuration));
  memcpy(&extras, &((struct channels_list_entry *)ptr)->extras, sizeof(struct extra_primitives));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "PostgreSQL Plugin", config.name);

  memset(&idata, 0, sizeof(idata));
  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);
  if (config.logfile) {
    fclose(config.logfile_fd);
    config.logfile_fd = open_logfile(config.logfile, "a");
  }

  sql_set_signals();
  sql_init_default_values(&extras);
  PG_init_default_values(&idata);
  PG_set_callbacks(&sqlfunc_cbr);
  sql_set_insert_func();

  /* some LOCAL initialization AFTER setting some default values */
  reload_map = FALSE;
  idata.now = time(NULL);
  refresh_deadline = idata.now;
  idata.cfg = &config;

  sql_init_maps(&extras, &prim_ptrs, &nt, &nc, &pt);
  sql_init_global_buffers();
  sql_init_historical_acct(idata.now, &idata);
  sql_init_triggers(idata.now, &idata);
  sql_init_refresh_deadline(&refresh_deadline);

  if (config.pipe_amqp) {
    plugin_pipe_amqp_compile_check();
#ifdef WITH_RABBITMQ
    pipe_fd = plugin_pipe_amqp_connect_to_consume(amqp_host, plugin_data);
    amqp_timeout = plugin_pipe_amqp_set_poll_timeout(amqp_host, pipe_fd);
#endif
  }
  else setnonblocking(pipe_fd);

  /* building up static SQL clauses */
  idata.num_primitives = PG_compose_static_queries();
  glob_num_primitives = idata.num_primitives; 

  /* handling logfile template stuff */
  te = sql_init_logfile_template(&th);
  INIT_BUF(logbuf);

  /* setting up environment variables */
  SQL_SetENV();

  sql_link_backend_descriptors(&bed, &p, &b);

  /* plugin main loop */
  for(;;) {
    poll_again:
    status->wakeup = TRUE;
    calc_refresh_timeout(refresh_deadline, idata.now, &refresh_timeout);

    pfd.fd = pipe_fd;
    pfd.events = POLLIN;
    timeout = MIN(refresh_timeout, (amqp_timeout ? amqp_timeout : INT_MAX));
    ret = poll(&pfd, (pfd.fd == ERR ? 0 : 1), timeout);

    if (ret <= 0) {
      if (getppid() == 1) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Core process *seems* gone. Exiting.\n", config.name, config.type);
        exit_plugin(1);
      }

      if (ret < 0) goto poll_again;
    }

    idata.now = time(NULL);
    now = idata.now;

    if (config.sql_history) {
      while (idata.now > (idata.basetime + idata.timeslot)) {
        time_t saved_basetime = idata.basetime;

        idata.basetime += idata.timeslot;
        if (config.sql_history == COUNT_MONTHLY)
          idata.timeslot = calc_monthly_timeslot(idata.basetime, config.sql_history_howmany, ADD);
        glob_basetime = idata.basetime;
        idata.new_basetime = saved_basetime;
        glob_new_basetime = saved_basetime;
      }
    }

#ifdef WITH_RABBITMQ
    if (config.pipe_amqp && pipe_fd == ERR) {
      if (timeout == amqp_timeout) {
        pipe_fd = plugin_pipe_amqp_connect_to_consume(amqp_host, plugin_data);
        amqp_timeout = plugin_pipe_amqp_set_poll_timeout(amqp_host, pipe_fd);
      }
      else amqp_timeout = plugin_pipe_amqp_calc_poll_timeout_diff(amqp_host, idata.now);
    }
#endif

    switch (ret) {
    case 0: /* poll(): timeout */
      if (qq_ptr) sql_cache_flush(queries_queue, qq_ptr, &idata, FALSE);
      sql_cache_handle_flush_event(&idata, &refresh_deadline, &pt);
      break;
    default: /* poll(): received data */
      read_data:
      if (!config.pipe_amqp) {
        if (!pollagain) {
          seq++;
          seq %= MAX_SEQNUM;
          if (seq == 0) rg_err_count = FALSE;
          idata.now = time(NULL);
	  now = idata.now;
        }
        else {
          if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0)
            exit_plugin(1); /* we exit silently; something happened at the write end */
        }

        if ((rg->ptr + bufsz) > rg->end) rg->ptr = rg->base;

        if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
          if (!pollagain) {
            pollagain = TRUE;
            goto poll_again;
          }
          else {
            rg_err_count++;
            if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
              Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
              Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
              Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%u'.\n", config.pipe_size);
              Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%u'.\n", config.buffer_size);
              Log(LOG_ERR, "- increase system maximum socket size.\n\n");
            }
            seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
          }
        }

        pollagain = FALSE;
        memcpy(pipebuf, rg->ptr, bufsz);
        rg->ptr += bufsz;
      }
#ifdef WITH_RABBITMQ
      else {
        ret = p_amqp_consume_binary(amqp_host, pipebuf, config.buffer_size);
        if (ret) pipe_fd = ERR;

        seq = ((struct ch_buf_hdr *)pipebuf)->seq;
        amqp_timeout = plugin_pipe_amqp_set_poll_timeout(amqp_host, pipe_fd);
      }
#endif

      /* lazy sql refresh handling */ 
      if (idata.now > refresh_deadline) {
        if (qq_ptr) sql_cache_flush(queries_queue, qq_ptr, &idata, FALSE);
        sql_cache_handle_flush_event(&idata, &refresh_deadline, &pt);
      } 
      else {
        if (config.sql_trigger_exec) {
          while (idata.now > idata.triggertime && idata.t_timeslot > 0) {
            sql_trigger_exec(config.sql_trigger_exec);
	    idata.triggertime += idata.t_timeslot;
	    if (config.sql_trigger_time == COUNT_MONTHLY)
	      idata.t_timeslot = calc_monthly_timeslot(idata.triggertime, config.sql_trigger_time_howmany, ADD);
          }
        }
      }

      data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): buffer received seq=%u num_entries=%u\n", config.name, config.type, seq, ((struct ch_buf_hdr *)pipebuf)->num);

      while (((struct ch_buf_hdr *)pipebuf)->num > 0) {
        for (num = 0; primptrs_funcs[num]; num++)
          (*primptrs_funcs[num])((u_char *)data, &extras, &prim_ptrs);

	for (num = 0; net_funcs[num]; num++)
	  (*net_funcs[num])(&nt, &nc, &data->primitives, prim_ptrs.pbgp, &nfd);

	if (config.ports_file) {
          if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
          if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
        }

        if (config.pkt_len_distrib_bins_str &&
            config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB)
          evaluate_pkt_len_distrib(data);

        prim_ptrs.data = data;
        (*insert_func)(&prim_ptrs, &idata);

        ((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
          dataptr = (unsigned char *) data;
          if (!prim_ptrs.vlen_next_off) dataptr += datasize;
          else dataptr += prim_ptrs.vlen_next_off;
          data = (struct pkt_data *) dataptr;
	}
      }

      if (!config.pipe_amqp) goto read_data;
    }
  }
}
예제 #24
0
/* Functions */
void sqlite3_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr) 
{
  struct pkt_data *data;
  struct ports_table pt;
  struct pollfd pfd;
  struct insert_data idata;
  struct timezone tz;
  time_t refresh_deadline;
  int timeout;
  int ret, num;
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  struct pkt_bgp_primitives *pbgp;
  char *dataptr;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0; 

  memcpy(&config, cfgptr, sizeof(struct configuration));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "SQLite3 Plugin", config.name);
  memset(&idata, 0, sizeof(idata));
  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);
  if (config.logfile) {
    fclose(config.logfile_fd);
    config.logfile_fd = open_logfile(config.logfile);
  }

  sql_set_signals();
  sql_init_default_values();
  SQLI_init_default_values(&idata);
  SQLI_set_callbacks(&sqlfunc_cbr);
  sql_set_insert_func();

  /* some LOCAL initialization AFTER setting some default values */
  reload_map = FALSE;
  idata.now = time(NULL);
  refresh_deadline = idata.now;

  sql_init_maps(&nt, &nc, &pt);
  sql_init_global_buffers();
  sql_init_pipe(&pfd, pipe_fd);
  sql_init_historical_acct(idata.now, &idata);
  sql_init_triggers(idata.now, &idata);
  sql_init_refresh_deadline(&refresh_deadline);

  /* setting number of entries in _protocols structure */
  while (_protocols[protocols_number].number != -1) protocols_number++;

  /* building up static SQL clauses */
  idata.num_primitives = SQLI_compose_static_queries();
  glob_num_primitives = idata.num_primitives; 

  /* handling purge preprocessor */
  set_preprocess_funcs(config.sql_preprocess, &prep); 

  /* setting up environment variables */
  SQL_SetENV();

  sql_link_backend_descriptors(&bed, &p, &b);

  /* plugin main loop */
  for(;;) {
    poll_again:
    status->wakeup = TRUE;
    sql_calc_refresh_timeout(refresh_deadline, idata.now, &timeout);
    ret = poll(&pfd, 1, timeout);
    if (ret < 0) goto poll_again;

    idata.now = time(NULL);

    if (config.sql_history) {
      while (idata.now > (idata.basetime + idata.timeslot)) {
        time_t saved_basetime = idata.basetime;

        idata.basetime += idata.timeslot;
        if (config.sql_history == COUNT_MONTHLY)
          idata.timeslot = calc_monthly_timeslot(idata.basetime, config.sql_history_howmany, ADD);
        glob_basetime = idata.basetime;
        idata.new_basetime = saved_basetime;
        glob_new_basetime = saved_basetime;
      }
    }

    switch (ret) {
    case 0: /* timeout */
      if (qq_ptr) sql_cache_flush(queries_queue, qq_ptr, &idata, FALSE);
      switch (fork()) {
      case 0: /* Child */
	/* we have to ignore signals to avoid loops:
	   because we are already forked */
	signal(SIGINT, SIG_IGN);
	signal(SIGHUP, SIG_IGN);
	pm_setproctitle("%s [%s]", "SQLite3 Plugin -- DB Writer", config.name);

        if (qq_ptr && sql_writers.flags != CHLD_ALERT) {
	  if (sql_writers.flags == CHLD_WARNING) sql_db_fail(&p);
          (*sqlfunc_cbr.connect)(&p, NULL); 
          (*sqlfunc_cbr.purge)(queries_queue, qq_ptr, &idata);
	  (*sqlfunc_cbr.close)(&bed);
	}

	if (config.sql_trigger_exec) {
	  if (idata.now > idata.triggertime) sql_trigger_exec(config.sql_trigger_exec);
	}
	  
        exit(0);
      default: /* Parent */
	if (pqq_ptr) sql_cache_flush_pending(pending_queries_queue, pqq_ptr, &idata);
	gettimeofday(&idata.flushtime, &tz);
	while (idata.now > refresh_deadline)
	  refresh_deadline += config.sql_refresh_time; 
	while (idata.now > idata.triggertime && idata.t_timeslot > 0) {
	  idata.triggertime  += idata.t_timeslot;
	  if (config.sql_trigger_time == COUNT_MONTHLY)
	    idata.t_timeslot = calc_monthly_timeslot(idata.triggertime, config.sql_trigger_time_howmany, ADD);
	}
	idata.new_basetime = FALSE;
	glob_new_basetime = FALSE;
	qq_ptr = pqq_ptr;
	memcpy(queries_queue, pending_queries_queue, qq_ptr*sizeof(struct db_cache *));

	if (reload_map) {
	  load_networks(config.networks_file, &nt, &nc);
	  load_ports(config.ports_file, &pt);
	  reload_map = FALSE;
	}
        break;
      }
      break;
    default: /* we received data */
      read_data:
      if (!pollagain) {
        seq++;
        seq %= MAX_SEQNUM;
	if (seq == 0) rg_err_count = FALSE;
	idata.now = time(NULL); 
      }
      else {
        if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0) 
	  exit_plugin(1); /* we exit silently; something happened at the write end */
      }

      if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
	if (!pollagain) {
	  pollagain = TRUE;
	  goto poll_again;
        }
	else {
	  rg_err_count++;
	  if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
            Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
            Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
            Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%u'.\n", config.pipe_size);
            Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%u'.\n", config.buffer_size);
            Log(LOG_ERR, "- increase system maximum socket size.\n\n");
	  }
          seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
	}
      }

      pollagain = FALSE;
      memcpy(pipebuf, rg->ptr, bufsz);
      if ((rg->ptr+bufsz) >= rg->end) rg->ptr = rg->base;
      else rg->ptr += bufsz;

      /* lazy sql refresh handling */ 
      if (idata.now > refresh_deadline) {
        if (qq_ptr) sql_cache_flush(queries_queue, qq_ptr, &idata, FALSE);
        switch (fork()) {
        case 0: /* Child */
          /* we have to ignore signals to avoid loops:
             because we are already forked */
          signal(SIGINT, SIG_IGN);
          signal(SIGHUP, SIG_IGN);
	  pm_setproctitle("%s [%s]", "SQLite3 Plugin -- DB Writer", config.name);

          if (qq_ptr && sql_writers.flags != CHLD_ALERT) {
	    if (sql_writers.flags == CHLD_WARNING) sql_db_fail(&p);
            (*sqlfunc_cbr.connect)(&p, NULL);
            (*sqlfunc_cbr.purge)(queries_queue, qq_ptr, &idata);
	    (*sqlfunc_cbr.close)(&bed);
	  }

	  if (config.sql_trigger_exec) {
	    if (idata.now > idata.triggertime) sql_trigger_exec(config.sql_trigger_exec);
	  }

          exit(0);
        default: /* Parent */
	  if (pqq_ptr) sql_cache_flush_pending(pending_queries_queue, pqq_ptr, &idata);
	  gettimeofday(&idata.flushtime, &tz);
	  while (idata.now > refresh_deadline)
	    refresh_deadline += config.sql_refresh_time; 
          while (idata.now > idata.triggertime && idata.t_timeslot > 0) {
	    idata.triggertime  += idata.t_timeslot;
	    if (config.sql_trigger_time == COUNT_MONTHLY)
	      idata.t_timeslot = calc_monthly_timeslot(idata.triggertime, config.sql_trigger_time_howmany, ADD);
	  }
	  idata.new_basetime = FALSE;
	  glob_new_basetime = FALSE;
	  qq_ptr = pqq_ptr;
	  memcpy(queries_queue, pending_queries_queue, qq_ptr*sizeof(struct db_cache *));

	  if (reload_map) {
	    load_networks(config.networks_file, &nt, &nc);
	    load_ports(config.ports_file, &pt);
	    reload_map = FALSE;
	  }
          break;
        }
      } 
      else {
	if (config.sql_trigger_exec) {
	  while (idata.now > idata.triggertime && idata.t_timeslot > 0) {
	    sql_trigger_exec(config.sql_trigger_exec); 
	    idata.triggertime += idata.t_timeslot;
	    if (config.sql_trigger_time == COUNT_MONTHLY)
	      idata.t_timeslot = calc_monthly_timeslot(idata.triggertime, config.sql_trigger_time_howmany, ADD);
	  }
	}
      }

      data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));

      while (((struct ch_buf_hdr *)pipebuf)->num) {
	for (num = 0; net_funcs[num]; num++)
	  (*net_funcs[num])(&nt, &nc, &data->primitives);

	if (config.ports_file) {
	  if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
	  if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
	}

        if (PbgpSz) pbgp = (struct pkt_bgp_primitives *) ((u_char *)data+PdataSz);
        else pbgp = NULL;

        (*insert_func)(data, pbgp, &idata);

        ((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
          dataptr = (unsigned char *) data;
          dataptr += PdataSz + PbgpSz;
          data = (struct pkt_data *) dataptr;
        }
      }
      goto read_data;
    }
  }
}
예제 #25
0
int PG_compose_static_queries()
{
  int primitives=0, set_primitives=0, set_event_primitives=0, have_flows=0, lock=0;
  char default_delim[] = ",", delim_buf[SRVBUFLEN];

  if (config.what_to_count & COUNT_FLOWS || (config.sql_table_version >= 4 &&
                                             config.sql_table_version < SQL_TABLE_VERSION_BGP &&
                                             !config.sql_optimize_clauses)) {
    config.what_to_count |= COUNT_FLOWS;
    have_flows = TRUE;

    if ((config.sql_table_version < 4 || config.sql_table_version >= SQL_TABLE_VERSION_BGP) && !config.sql_optimize_clauses) {
      Log(LOG_ERR, "ERROR ( %s/%s ): The accounting of flows requires SQL table v4. Exiting.\n", config.name, config.type);
      exit_plugin(1);
    }
  }

  /* "INSERT INTO ... VALUES ... ", "COPY ... ", "... WHERE ..." stuff */
  strncpy(where[primitives].string, " WHERE ", sizeof(where[primitives].string));
  snprintf(copy_clause, sizeof(copy_clause), "COPY %s (", config.sql_table);
  snprintf(insert_clause, sizeof(insert_clause), "INSERT INTO %s (", config.sql_table);
  strncpy(values[primitives].string, " VALUES (", sizeof(values[primitives].string));
  primitives = PG_evaluate_history(primitives);
  primitives = sql_evaluate_primitives(primitives);

  strncat(copy_clause, ", packets, bytes", SPACELEFT(copy_clause));
  if (have_flows) strncat(copy_clause, ", flows", SPACELEFT(copy_clause));

  if (!config.sql_delimiter || !config.sql_use_copy)
    snprintf(delim_buf, SRVBUFLEN, ") FROM STDIN DELIMITER \'%s\'", default_delim);
  else
    snprintf(delim_buf, SRVBUFLEN, ") FROM STDIN DELIMITER \'%s\'", config.sql_delimiter);
  strncat(copy_clause, delim_buf, SPACELEFT(copy_clause));

  strncpy(insert_counters_clause, ", packets, bytes", SPACELEFT(insert_counters_clause));
  if (have_flows) strncat(insert_counters_clause, ", flows", SPACELEFT(insert_counters_clause));
  strncat(insert_counters_clause, ")", SPACELEFT(insert_counters_clause));
  strncpy(insert_nocounters_clause, ")", SPACELEFT(insert_nocounters_clause));

  /* "LOCK ..." stuff */
  
  if (config.sql_dont_try_update) snprintf(lock_clause, sizeof(lock_clause), "BEGIN;");
  else {
    if (config.sql_locking_style) lock = sql_select_locking_style(config.sql_locking_style); 
    switch (lock) {
    case PM_LOCK_NONE:
      snprintf(lock_clause, sizeof(lock_clause), "BEGIN;");
      break;
    case PM_LOCK_ROW_EXCLUSIVE:
      snprintf(lock_clause, sizeof(lock_clause), "BEGIN; LOCK %s IN ROW EXCLUSIVE MODE;", config.sql_table);
      break;
    case PM_LOCK_EXCLUSIVE:
    default:
      snprintf(lock_clause, sizeof(lock_clause), "BEGIN; LOCK %s IN EXCLUSIVE MODE;", config.sql_table);
      break;
    }
  }

  /* "UPDATE ... SET ..." stuff */
  snprintf(update_clause, sizeof(update_clause), "UPDATE %s ", config.sql_table);

  set_primitives = sql_compose_static_set(have_flows);
  set_event_primitives = sql_compose_static_set_event();

  if (config.sql_history) {
    if (!config.sql_history_since_epoch) {
      strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
      strncat(set[set_primitives].string, "stamp_updated=CURRENT_TIMESTAMP(0)", SPACELEFT(set[set_primitives].string)); 
      set[set_primitives].type = TIMESTAMP;
      set[set_primitives].handler = count_noop_setclause_handler;
      set_primitives++;

      if (set_event_primitives) strncpy(set_event[set_event_primitives].string, ", ", SPACELEFT(set_event[set_event_primitives].string));
      else strncpy(set_event[set_event_primitives].string, "SET ", SPACELEFT(set_event[set_event_primitives].string));
      strncat(set_event[set_event_primitives].string, "stamp_updated=CURRENT_TIMESTAMP(0)", SPACELEFT(set_event[set_event_primitives].string));
      set_event[set_event_primitives].type = TIMESTAMP;
      set_event[set_event_primitives].handler = count_noop_setclause_handler;
      set_event_primitives++;
    }
    else {
      strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
      strncat(set[set_primitives].string, "stamp_updated=DATE_PART('epoch',NOW())::BIGINT", SPACELEFT(set[set_primitives].string));
      set[set_primitives].type = TIMESTAMP;
      set[set_primitives].handler = count_noop_setclause_handler;
      set_primitives++;

      if (set_event_primitives) strncpy(set_event[set_event_primitives].string, ", ", SPACELEFT(set_event[set_event_primitives].string));
      else strncpy(set_event[set_event_primitives].string, "SET ", SPACELEFT(set_event[set_event_primitives].string));
      strncat(set_event[set_event_primitives].string, "stamp_updated=DATE_PART('epoch',NOW())::BIGINT", SPACELEFT(set_event[set_event_primitives].string));
      set_event[set_event_primitives].type = TIMESTAMP;
      set_event[set_event_primitives].handler = count_noop_setclause_handler;
      set_primitives++;
    }
  }

  /* values for COPY */
  memcpy(&copy_values, &values, sizeof(copy_values));
  {
    int num, x, y;
    char *ptr;

    ptr = strchr(copy_values[0].string, '(');
    ptr++; strcpy(copy_values[0].string, ptr);

    for (num = 0; num < primitives; num++) {
      for (x = 0; copy_values[num].string[x] != '\0'; x++) {
	if (copy_values[num].string[x] == ' ' || copy_values[num].string[x] == '\'') {
	  for (y = x + 1; copy_values[num].string[y] != '\0'; y++)
            copy_values[num].string[y-1] = copy_values[num].string[y];
          copy_values[num].string[y-1] = '\0';
          x--;
        }
      }
      copy_values[num].string[x] = '\0';
    }
  }

  return primitives;
}
예제 #26
0
int SQLI_compose_static_queries()
{
  int primitives=0, set_primitives=0, have_flows=0;

  if (config.what_to_count & COUNT_FLOWS || (config.sql_table_version >= 4 &&
                                             config.sql_table_version < SQL_TABLE_VERSION_BGP &&
                                             !config.sql_optimize_clauses)) {
    config.what_to_count |= COUNT_FLOWS;
    have_flows = TRUE;

    if ((config.sql_table_version < 4 || config.sql_table_version >= SQL_TABLE_VERSION_BGP) && !config.sql_optimize_clauses) {
      Log(LOG_ERR, "ERROR ( %s/%s ): The accounting of flows requires SQL table v4. Exiting.\n", config.name, config.type);
      exit_plugin(1);
    }
  }

  /* "INSERT INTO ... VALUES ... " and "... WHERE ..." stuff */
  strncpy(where[primitives].string, " WHERE ", sizeof(where[primitives].string));
  snprintf(insert_clause, sizeof(insert_clause), "INSERT INTO %s (", config.sql_table);
  strncpy(values[primitives].string, " VALUES (", sizeof(values[primitives].string));
  primitives = SQLI_evaluate_history(primitives);
  primitives = sql_evaluate_primitives(primitives);
  strncat(insert_clause, ", packets, bytes", SPACELEFT(insert_clause));
  if (have_flows) strncat(insert_clause, ", flows", SPACELEFT(insert_clause));
  strncat(insert_clause, ")", SPACELEFT(insert_clause));

  /* "LOCK ..." stuff */
  if (config.sql_locking_style) Log(LOG_WARNING, "WARN ( %s/%s ): sql_locking_style is not supported. Ignored.\n", config.name, config.type);
  snprintf(lock_clause, sizeof(lock_clause), "BEGIN", config.sql_table);
  strncpy(unlock_clause, "COMMIT", sizeof(unlock_clause));

  /* "UPDATE ... SET ..." stuff */
  snprintf(update_clause, sizeof(update_clause), "UPDATE %s ", config.sql_table);

  set_primitives = sql_compose_static_set(have_flows);

  if (config.sql_history || config.nfacctd_sql_log) {
    if (!config.nfacctd_sql_log) {
      if (!config.sql_history_since_epoch) {
	strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
	strncat(set[set_primitives].string, "stamp_updated=DATETIME('now', 'localtime')", SPACELEFT(set[set_primitives].string));
	set[set_primitives].type = TIMESTAMP;
	set[set_primitives].handler = count_noop_setclause_handler;
	set_primitives++;
      }
      else {
	strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
	strncat(set[set_primitives].string, "stamp_updated=STRFTIME('%%s', 'now')", SPACELEFT(set[set_primitives].string));
	set[set_primitives].type = TIMESTAMP;
	set[set_primitives].handler = count_noop_setclause_handler;
	set_primitives++;
      }
    }
    else {
      if (!config.sql_history_since_epoch) {
	strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
	strncat(set[set_primitives].string, "stamp_updated=DATETIME(%u, 'unixepoch', 'localtime')", SPACELEFT(set[set_primitives].string));
	set[set_primitives].type = TIMESTAMP;
	set[set_primitives].handler = count_timestamp_setclause_handler;
	set_primitives++;
      }
      else {
	strncpy(set[set_primitives].string, ", ", SPACELEFT(set[set_primitives].string));
	strncat(set[set_primitives].string, "stamp_updated=%u", SPACELEFT(set[set_primitives].string));
	set[set_primitives].type = TIMESTAMP;
	set[set_primitives].handler = count_timestamp_setclause_handler;
	set_primitives++;
      }
    }
  }

  return primitives;
}
예제 #27
0
void PG_init_default_values(struct insert_data *idata)
{
  /* Linking database parameters */
  if (!config.sql_data) config.sql_data = typed_str;
  if (!config.sql_user) config.sql_user = pgsql_user;
  if (!config.sql_db) config.sql_db = pgsql_db;
  if (!config.sql_passwd) config.sql_passwd = pgsql_pwd;
  if (!config.sql_table) {
    /* checking 'typed' table constraints */
    if (!strcmp(config.sql_data, "typed")) {
      if (config.what_to_count & (COUNT_SRC_AS|COUNT_SUM_AS|COUNT_DST_AS) && config.what_to_count &
	(COUNT_SRC_HOST|COUNT_SUM_HOST|COUNT_DST_HOST|COUNT_SRC_NET|COUNT_SUM_NET|COUNT_DST_NET) &&
	config.sql_table_version < 6) {
	Log(LOG_ERR, "ERROR ( %s/%s ): 'typed' PostgreSQL table in use: unable to mix HOST/NET and AS aggregations.\n", config.name, config.type);
	exit_plugin(1);
      }
      typed = TRUE;
    }
    else if (!strcmp(config.sql_data, "unified")) typed = FALSE;
    else {
      Log(LOG_ERR, "ERROR ( %s/%s ): Ignoring unknown 'sql_data' value '%s'.\n", config.name, config.type, config.sql_data);
      exit_plugin(1);
    }

    if (typed) {
      if (config.sql_table_version == (SQL_TABLE_VERSION_BGP+1)) config.sql_table = pgsql_table_bgp;
      else if (config.sql_table_version == 8) config.sql_table = pgsql_table_v8;
      else if (config.sql_table_version == 7) config.sql_table = pgsql_table_v7;
      else if (config.sql_table_version == 6) config.sql_table = pgsql_table_v6; 
      else if (config.sql_table_version == 5) {
        if (config.what_to_count & (COUNT_SRC_AS|COUNT_DST_AS|COUNT_SUM_AS)) config.sql_table = pgsql_table_as_v5;
        else config.sql_table = pgsql_table_v5;
      }
      else if (config.sql_table_version == 4) {
	if (config.what_to_count & (COUNT_SRC_AS|COUNT_DST_AS|COUNT_SUM_AS)) config.sql_table = pgsql_table_as_v4;
	else config.sql_table = pgsql_table_v4;
      }
      else if (config.sql_table_version == 3) {
	if (config.what_to_count & (COUNT_SRC_AS|COUNT_DST_AS|COUNT_SUM_AS)) config.sql_table = pgsql_table_as_v3;
	else config.sql_table = pgsql_table_v3;
      }
      else if (config.sql_table_version == 2) {
	if (config.what_to_count & (COUNT_SRC_AS|COUNT_DST_AS|COUNT_SUM_AS)) config.sql_table = pgsql_table_as_v2;
	else config.sql_table = pgsql_table_v2;
      }
      else {
	if (config.what_to_count & (COUNT_SRC_AS|COUNT_DST_AS|COUNT_SUM_AS)) config.sql_table = pgsql_table_as;
	else config.sql_table = pgsql_table;
      }
    }
    else {
      if (config.sql_table_version == 8) {
        Log(LOG_WARNING, "WARN ( %s/%s ): Unified data are no longer supported. Switching to typed data.\n", config.name, config.type);
        config.sql_table = pgsql_table_v8;
      }
      if (config.sql_table_version == 7) {
	Log(LOG_WARNING, "WARN ( %s/%s ): Unified data are no longer supported. Switching to typed data.\n", config.name, config.type);
	config.sql_table = pgsql_table_v7;
      }
      else if (config.sql_table_version == 6) {
	Log(LOG_WARNING, "WARN ( %s/%s ): Unified data are no longer supported. Switching to typed data.\n", config.name, config.type);
	config.sql_table = pgsql_table_v6;
      }
      else if (config.sql_table_version == 5) config.sql_table = pgsql_table_uni_v5;
      else if (config.sql_table_version == 4) config.sql_table = pgsql_table_uni_v4;
      else if (config.sql_table_version == 3) config.sql_table = pgsql_table_uni_v3;
      else if (config.sql_table_version == 2) config.sql_table = pgsql_table_uni_v2;
      else config.sql_table = pgsql_table_uni;
    }
  }
  if (strchr(config.sql_table, '%') || strchr(config.sql_table, '$')) idata->dyn_table = TRUE;
  glob_dyn_table = idata->dyn_table;

  if (config.sql_backup_host || config.sql_recovery_logfile) idata->recover = TRUE;
  if (!config.sql_dont_try_update && config.sql_use_copy) config.sql_use_copy = FALSE; 

  if (config.sql_locking_style) idata->locks = sql_select_locking_style(config.sql_locking_style);
}
예제 #28
0
void Tee_exit_now(int signum)
{
  wait(NULL);
  exit_plugin(0);
}
예제 #29
0
/* Functions */
void kafka_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr)
{
  struct pkt_data *data;
  struct ports_table pt;
  unsigned char *pipebuf;
  struct pollfd pfd;
  struct insert_data idata;
  time_t t, avro_schema_deadline = 0;
  int timeout, refresh_timeout, avro_schema_timeout = 0;
  int ret, num; 
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  struct plugins_list_entry *plugin_data = ((struct channels_list_entry *)ptr)->plugin;
  int datasize = ((struct channels_list_entry *)ptr)->datasize;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  pid_t core_pid = ((struct channels_list_entry *)ptr)->core_pid;
  struct networks_file_data nfd;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0;

  struct extra_primitives extras;
  struct primitives_ptrs prim_ptrs;
  char *dataptr;

#ifdef WITH_AVRO
  char *avro_acct_schema_str;
#endif

#ifdef WITH_ZMQ
  struct p_zmq_host *zmq_host = &((struct channels_list_entry *)ptr)->zmq_host;
#endif

  memcpy(&config, cfgptr, sizeof(struct configuration));
  memcpy(&extras, &((struct channels_list_entry *)ptr)->extras, sizeof(struct extra_primitives));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "Kafka Plugin", config.name);

  P_set_signals();
  P_init_default_values();
  P_config_checks();
  pipebuf = (unsigned char *) pm_malloc(config.buffer_size);
  memset(pipebuf, 0, config.buffer_size);

  timeout = config.sql_refresh_time*1000;

  if (!config.message_broker_output) config.message_broker_output = PRINT_OUTPUT_JSON;

  if (config.message_broker_output & PRINT_OUTPUT_JSON) {
    compose_json(config.what_to_count, config.what_to_count_2);
  }
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
    avro_acct_schema = build_avro_schema(config.what_to_count, config.what_to_count_2);
    avro_schema_add_writer_id(avro_acct_schema);

    if (config.avro_schema_output_file) write_avro_schema_to_file(config.avro_schema_output_file, avro_acct_schema);

    if (config.kafka_avro_schema_topic) {
      if (!config.kafka_avro_schema_refresh_time)
	config.kafka_avro_schema_refresh_time = DEFAULT_AVRO_SCHEMA_REFRESH_TIME;

      avro_schema_deadline = time(NULL);
      P_init_refresh_deadline(&avro_schema_deadline, config.kafka_avro_schema_refresh_time, 0, "m");
      avro_acct_schema_str = compose_avro_purge_schema(avro_acct_schema, config.name);
    }
    else {
      config.kafka_avro_schema_refresh_time = 0;
      avro_schema_deadline = 0;
      avro_schema_timeout = 0;
      avro_acct_schema_str = NULL;
    }
#endif
  }

  if ((config.sql_table && strchr(config.sql_table, '$')) && config.sql_multi_values) {
    Log(LOG_ERR, "ERROR ( %s/%s ): dynamic 'kafka_topic' is not compatible with 'kafka_multi_values'. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  if ((config.sql_table && strchr(config.sql_table, '$')) && config.amqp_routing_key_rr) {
    Log(LOG_ERR, "ERROR ( %s/%s ): dynamic 'kafka_topic' is not compatible with 'kafka_topic_rr'. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  /* setting function pointers */
  if (config.what_to_count & (COUNT_SUM_HOST|COUNT_SUM_NET))
    insert_func = P_sum_host_insert;
  else if (config.what_to_count & COUNT_SUM_PORT) insert_func = P_sum_port_insert;
  else if (config.what_to_count & COUNT_SUM_AS) insert_func = P_sum_as_insert;
#if defined (HAVE_L2)
  else if (config.what_to_count & COUNT_SUM_MAC) insert_func = P_sum_mac_insert;
#endif
  else insert_func = P_cache_insert;
  purge_func = kafka_cache_purge;

  memset(&nt, 0, sizeof(nt));
  memset(&nc, 0, sizeof(nc));
  memset(&pt, 0, sizeof(pt));

  load_networks(config.networks_file, &nt, &nc);
  set_net_funcs(&nt);

  if (config.ports_file) load_ports(config.ports_file, &pt);
  if (config.pkt_len_distrib_bins_str) load_pkt_len_distrib_bins();
  else {
    if (config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB) {
      Log(LOG_ERR, "ERROR ( %s/%s ): 'aggregate' contains pkt_len_distrib but no 'pkt_len_distrib_bins' defined. Exiting.\n", config.name, config.type);
      exit_plugin(1);
    }
  }
  
  memset(&idata, 0, sizeof(idata));
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  set_primptrs_funcs(&extras);

  if (config.pipe_zmq) {
    plugin_pipe_zmq_compile_check();
#ifdef WITH_ZMQ
    p_zmq_plugin_pipe_init_plugin(zmq_host);
    p_zmq_plugin_pipe_consume(zmq_host);
    p_zmq_set_retry_timeout(zmq_host, config.pipe_zmq_retry);
    pipe_fd = p_zmq_get_fd(zmq_host);
    seq = 0;
#endif
  }
  else setnonblocking(pipe_fd);

  idata.now = time(NULL);

  /* print_refresh time init: deadline */
  refresh_deadline = idata.now; 
  P_init_refresh_deadline(&refresh_deadline, config.sql_refresh_time, config.sql_startup_delay, config.sql_history_roundoff);

  if (config.sql_history) {
    basetime_init = P_init_historical_acct;
    basetime_eval = P_eval_historical_acct;
    basetime_cmp = P_cmp_historical_acct;

    (*basetime_init)(idata.now);
  }

  /* setting number of entries in _protocols structure */
  while (_protocols[protocols_number].number != -1) protocols_number++;

  /* plugin main loop */
  for(;;) {
    poll_again:
    status->wakeup = TRUE;
    calc_refresh_timeout(refresh_deadline, idata.now, &refresh_timeout);
    if (config.kafka_avro_schema_topic) calc_refresh_timeout(avro_schema_deadline, idata.now, &avro_schema_timeout);

    pfd.fd = pipe_fd;
    pfd.events = POLLIN;
    timeout = MIN(refresh_timeout, (avro_schema_timeout ? avro_schema_timeout : INT_MAX));
    ret = poll(&pfd, (pfd.fd == ERR ? 0 : 1), timeout);

    if (ret <= 0) {
      if (getppid() == 1) {
        Log(LOG_ERR, "ERROR ( %s/%s ): Core process *seems* gone. Exiting.\n", config.name, config.type);
        exit_plugin(1);
      }

      if (ret < 0) goto poll_again;
    }

    idata.now = time(NULL);

    if (config.sql_history) {
      while (idata.now > (basetime.tv_sec + timeslot)) {
	new_basetime.tv_sec = basetime.tv_sec;
        basetime.tv_sec += timeslot;
        if (config.sql_history == COUNT_MONTHLY)
          timeslot = calc_monthly_timeslot(basetime.tv_sec, config.sql_history_howmany, ADD);
      }
    }

#ifdef WITH_AVRO
    if (idata.now > avro_schema_deadline) {
      kafka_avro_schema_purge(avro_acct_schema_str);
      avro_schema_deadline += config.kafka_avro_schema_refresh_time;
    }
#endif

    switch (ret) {
    case 0: /* timeout */
      if (idata.now > refresh_deadline) P_cache_handle_flush_event(&pt);
      break;
    default: /* we received data */
      read_data:
      if (config.pipe_homegrown) {
        if (!pollagain) {
          seq++;
          seq %= MAX_SEQNUM;
          if (seq == 0) rg_err_count = FALSE;
        }
        else {
          if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0) 
	    exit_plugin(1); /* we exit silently; something happened at the write end */
        }

        if ((rg->ptr + bufsz) > rg->end) rg->ptr = rg->base;

        if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
          if (!pollagain) {
            pollagain = TRUE;
            goto poll_again;
          }
          else {
            rg_err_count++;
            if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
              Log(LOG_WARNING, "WARN ( %s/%s ): Missing data detected (plugin_buffer_size=%llu plugin_pipe_size=%llu).\n",
                        config.name, config.type, config.buffer_size, config.pipe_size);
              Log(LOG_WARNING, "WARN ( %s/%s ): Increase values or look for plugin_buffer_size, plugin_pipe_size in CONFIG-KEYS document.\n\n",
                        config.name, config.type);
            }

	    rg->ptr = (rg->base + status->last_buf_off);
            seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
          }
        }

        pollagain = FALSE;
        memcpy(pipebuf, rg->ptr, bufsz);
        rg->ptr += bufsz;
      }
#ifdef WITH_ZMQ
      else if (config.pipe_zmq) {
	ret = p_zmq_plugin_pipe_recv(zmq_host, pipebuf, config.buffer_size);
	if (ret > 0) {
	  if (seq && (((struct ch_buf_hdr *)pipebuf)->seq != ((seq + 1) % MAX_SEQNUM))) {
	    Log(LOG_WARNING, "WARN ( %s/%s ): Missing data detected. Sequence received=%u expected=%u\n",
		config.name, config.type, ((struct ch_buf_hdr *)pipebuf)->seq, ((seq + 1) % MAX_SEQNUM));
	  }

	  seq = ((struct ch_buf_hdr *)pipebuf)->seq;
	}
	else goto poll_again;
      }
#endif

      /* lazy refresh time handling */ 
      if (idata.now > refresh_deadline) P_cache_handle_flush_event(&pt);

      data = (struct pkt_data *) (pipebuf+sizeof(struct ch_buf_hdr));

      if (config.debug_internal_msg) 
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): buffer received cpid=%u len=%llu seq=%u num_entries=%u\n",
                config.name, config.type, core_pid, ((struct ch_buf_hdr *)pipebuf)->len,
                seq, ((struct ch_buf_hdr *)pipebuf)->num);

      if (!config.pipe_check_core_pid || ((struct ch_buf_hdr *)pipebuf)->core_pid == core_pid) {
      while (((struct ch_buf_hdr *)pipebuf)->num > 0) {
        for (num = 0; primptrs_funcs[num]; num++)
          (*primptrs_funcs[num])((u_char *)data, &extras, &prim_ptrs);

	for (num = 0; net_funcs[num]; num++)
	  (*net_funcs[num])(&nt, &nc, &data->primitives, prim_ptrs.pbgp, &nfd);

	if (config.ports_file) {
          if (!pt.table[data->primitives.src_port]) data->primitives.src_port = 0;
          if (!pt.table[data->primitives.dst_port]) data->primitives.dst_port = 0;
        }

        if (config.pkt_len_distrib_bins_str &&
            config.what_to_count_2 & COUNT_PKT_LEN_DISTRIB)
          evaluate_pkt_len_distrib(data);

        prim_ptrs.data = data;
        (*insert_func)(&prim_ptrs, &idata);

	((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
          dataptr = (unsigned char *) data;
          if (!prim_ptrs.vlen_next_off) dataptr += datasize;
          else dataptr += prim_ptrs.vlen_next_off;
          data = (struct pkt_data *) dataptr;
	}
      }
      }

      goto read_data;
    }
  }
}
예제 #30
0
void tee_plugin(int pipe_fd, struct configuration *cfgptr, void *ptr)
{
  struct pkt_msg *msg;
  unsigned char *pipebuf;
  struct pollfd pfd;
  int timeout, err;
  int ret, num, fd, pool_idx, recv_idx;
  struct ring *rg = &((struct channels_list_entry *)ptr)->rg;
  struct ch_status *status = ((struct channels_list_entry *)ptr)->status;
  u_int32_t bufsz = ((struct channels_list_entry *)ptr)->bufsize;
  char *dataptr, dest_addr[256], dest_serv[256];
  struct tee_receiver *target = NULL;
  struct plugin_requests req;

  unsigned char *rgptr;
  int pollagain = TRUE;
  u_int32_t seq = 1, rg_err_count = 0;

  memcpy(&config, cfgptr, sizeof(struct configuration));
  recollect_pipe_memory(ptr);
  pm_setproctitle("%s [%s]", "Tee Plugin", config.name);
  if (config.pidfile) write_pid_file_plugin(config.pidfile, config.type, config.name);
  if (config.logfile) {
    fclose(config.logfile_fd);
    config.logfile_fd = open_logfile(config.logfile, "a");
  }

  if (config.proc_priority) {
    int ret;

    ret = setpriority(PRIO_PROCESS, 0, config.proc_priority);
    if (ret) Log(LOG_WARNING, "WARN ( %s/%s ): proc_priority failed (errno: %d)\n", config.name, config.type, errno);
    else Log(LOG_INFO, "INFO ( %s/%s ): proc_priority set to %d\n", config.name, config.type, getpriority(PRIO_PROCESS, 0));
  }

  /* signal handling */
  signal(SIGINT, Tee_exit_now);
  signal(SIGUSR1, SIG_IGN);
  signal(SIGUSR2, reload_maps); /* sets to true the reload_maps flag */
  signal(SIGPIPE, SIG_IGN);
  signal(SIGCHLD, SIG_IGN);

  if (config.tee_transparent && getuid() != 0) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Transparent mode requires super-user permissions. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }

  if (config.nfprobe_receiver && config.tee_receivers) {
    Log(LOG_ERR, "ERROR ( %s/%s ): tee_receiver and tee_receivers are mutually exclusive. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }
  else if (!config.nfprobe_receiver && !config.tee_receivers) {
    Log(LOG_ERR, "ERROR ( %s/%s ): No receivers specified: tee_receiver or tee_receivers is required. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&receivers, 0, sizeof(receivers));
  memset(&req, 0, sizeof(req));
  reload_map = FALSE;

  /* Setting up pools */
  if (!config.tee_max_receiver_pools) config.tee_max_receiver_pools = MAX_TEE_POOLS;

  receivers.pools = malloc((config.tee_max_receiver_pools+1)*sizeof(struct tee_receivers_pool));
  if (!receivers.pools) {
    Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate receiver pools. Exiting ...\n", config.name, config.type);
    exit_plugin(1);
  }
  else memset(receivers.pools, 0, (config.tee_max_receiver_pools+1)*sizeof(struct tee_receivers_pool));

  /* Setting up receivers per pool */
  if (!config.tee_max_receivers) config.tee_max_receivers = MAX_TEE_RECEIVERS;

  for (pool_idx = 0; pool_idx < MAX_TEE_POOLS; pool_idx++) { 
    receivers.pools[pool_idx].receivers = malloc(config.tee_max_receivers*sizeof(struct tee_receivers));
    if (!receivers.pools[pool_idx].receivers) {
      Log(LOG_ERR, "ERROR ( %s/%s ): unable to allocate receivers for pool #%u. Exiting ...\n", config.name, config.type, pool_idx);
      exit_plugin(1);
    }
    else memset(receivers.pools[pool_idx].receivers, 0, config.tee_max_receivers*sizeof(struct tee_receivers));
  }

  if (config.nfprobe_receiver) {
    pool_idx = 0; recv_idx = 0;

    target = &receivers.pools[pool_idx].receivers[recv_idx];
    target->dest_len = sizeof(target->dest);
    if (Tee_parse_hostport(config.nfprobe_receiver, (struct sockaddr *) &target->dest, &target->dest_len)) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Invalid receiver %s . ", config.name, config.type, config.nfprobe_receiver);
      exit_plugin(1);
    }
    else {
      recv_idx++; receivers.pools[pool_idx].num = recv_idx;
      pool_idx++; receivers.num = pool_idx;
    }
  }
  else if (config.tee_receivers) {
    int recvs_allocated = FALSE;

    req.key_value_table = (void *) &receivers;
    load_id_file(MAP_TEE_RECVS, config.tee_receivers, NULL, &req, &recvs_allocated);
  }

  config.sql_refresh_time = DEFAULT_TEE_REFRESH_TIME;
  timeout = config.sql_refresh_time*1000;

  pipebuf = (unsigned char *) Malloc(config.buffer_size);

  pfd.fd = pipe_fd;
  pfd.events = POLLIN;
  setnonblocking(pipe_fd);

  memset(pipebuf, 0, config.buffer_size);
  err_cant_bridge_af = 0;

  /* Arrange send socket */
  Tee_init_socks();

  /* plugin main loop */
  for (;;) {
    poll_again:
    status->wakeup = TRUE;
    ret = poll(&pfd, 1, timeout);
    if (ret < 0) goto poll_again;

    if (reload_map) {
      int recvs_allocated = FALSE;

      Tee_destroy_recvs();
      load_id_file(MAP_TEE_RECVS, config.tee_receivers, NULL, &req, &recvs_allocated);

      Tee_init_socks();
      reload_map = FALSE;
    }

    switch (ret) {
    case 0: /* timeout */
      /* reserved for future since we don't currently cache/batch/etc */
      break;
    default: /* we received data */
      read_data:
      if (!pollagain) {
        seq++;
        seq %= MAX_SEQNUM;
        if (seq == 0) rg_err_count = FALSE;
      }
      else {
        if ((ret = read(pipe_fd, &rgptr, sizeof(rgptr))) == 0)
          exit_plugin(1); /* we exit silently; something happened at the write end */
      }

      if ((rg->ptr + bufsz) > rg->end) rg->ptr = rg->base;

      if (((struct ch_buf_hdr *)rg->ptr)->seq != seq) {
        if (!pollagain) {
          pollagain = TRUE;
          goto poll_again;
        }
        else {
          rg_err_count++;
          if (config.debug || (rg_err_count > MAX_RG_COUNT_ERR)) {
            Log(LOG_ERR, "ERROR ( %s/%s ): We are missing data.\n", config.name, config.type);
            Log(LOG_ERR, "If you see this message once in a while, discard it. Otherwise some solutions follow:\n");
            Log(LOG_ERR, "- increase shared memory size, 'plugin_pipe_size'; now: '%u'.\n", config.pipe_size);
            Log(LOG_ERR, "- increase buffer size, 'plugin_buffer_size'; now: '%u'.\n", config.buffer_size);
            Log(LOG_ERR, "- increase system maximum socket size.\n\n");
          }
          seq = ((struct ch_buf_hdr *)rg->ptr)->seq;
        }
      }

      pollagain = FALSE;
      memcpy(pipebuf, rg->ptr, bufsz);
      rg->ptr += bufsz;

      msg = (struct pkt_msg *) (pipebuf+sizeof(struct ch_buf_hdr));
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): buffer received seq=%u num_entries=%u\n", config.name, config.type, seq, ((struct ch_buf_hdr *)pipebuf)->num);

      while (((struct ch_buf_hdr *)pipebuf)->num > 0) {
	for (pool_idx = 0; pool_idx < receivers.num; pool_idx++) {
	  if (!evaluate_tags(&receivers.pools[pool_idx].tag_filter, msg->tag)) {
	    if (!receivers.pools[pool_idx].balance.func) {
	      for (recv_idx = 0; recv_idx < receivers.pools[pool_idx].num; recv_idx++) {
	        target = &receivers.pools[pool_idx].receivers[recv_idx];
	        Tee_send(msg, (struct sockaddr *) &target->dest, target->fd);
	      }
	    }
	    else {
	      target = receivers.pools[pool_idx].balance.func(&receivers.pools[pool_idx], msg);
	      Tee_send(msg, (struct sockaddr *) &target->dest, target->fd);
	    }
	  }
	}

        ((struct ch_buf_hdr *)pipebuf)->num--;
        if (((struct ch_buf_hdr *)pipebuf)->num) {
	  dataptr = (unsigned char *) msg;
          dataptr += PmsgSz;
	  msg = (struct pkt_msg *) dataptr;
	}
      }
      goto read_data;
    }
  }  
}