Пример #1
0
int write_and_free_json_kafka(void *kafka_log, void *obj)
{
  char *orig_kafka_topic = NULL, dyn_kafka_topic[SRVBUFLEN];
  struct p_kafka_host *alog = (struct p_kafka_host *) kafka_log;
  int ret = ERR;

  char *tmpbuf = NULL;
  json_t *json_obj = (json_t *) obj;

  tmpbuf = json_dumps(json_obj, JSON_PRESERVE_ORDER);
  json_decref(json_obj);

  if (tmpbuf) {
    if (alog->topic_rr.max) {
      orig_kafka_topic = p_kafka_get_topic(alog);
      P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &alog->topic_rr);
      p_kafka_set_topic(alog, dyn_kafka_topic);
    }

    ret = p_kafka_produce_data(alog, tmpbuf, strlen(tmpbuf));
    free(tmpbuf);

    if (alog->topic_rr.max) p_kafka_set_topic(alog, orig_kafka_topic);
  }

  return ret;
}
Пример #2
0
void kafka_avro_schema_purge(char *avro_schema_str)
{
  struct p_kafka_host kafka_avro_schema_host;
  int part, part_cnt, tpc;

  if (!avro_schema_str || !config.kafka_avro_schema_topic) return;

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  p_kafka_init_host(&kafka_avro_schema_host, config.kafka_config_file);
  p_kafka_connect_to_produce(&kafka_avro_schema_host);
  p_kafka_set_broker(&kafka_avro_schema_host, config.sql_host, config.kafka_broker_port);
  p_kafka_set_topic(&kafka_avro_schema_host, config.kafka_avro_schema_topic);
  p_kafka_set_partition(&kafka_avro_schema_host, config.kafka_partition);
  p_kafka_set_key(&kafka_avro_schema_host, config.kafka_partition_key, config.kafka_partition_keylen);
  p_kafka_set_content_type(&kafka_avro_schema_host, PM_KAFKA_CNT_TYPE_STR);

  if (config.kafka_partition_dynamic) {
    rd_kafka_resp_err_t err;
    const struct rd_kafka_metadata *metadata;

    err = rd_kafka_metadata(kafka_avro_schema_host.rk, 0, kafka_avro_schema_host.topic, &metadata, 100);
    if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
      Log(LOG_ERR, "ERROR ( %s/%s ): Unable to get Kafka metadata for topic %s: %s\n",
              config.name, config.type, kafka_avro_schema_host.topic, rd_kafka_err2str(err));
      exit_plugin(1);
    }

    part_cnt = -1;
    for (tpc = 0 ; tpc < metadata->topic_cnt ; tpc++) {
      const struct rd_kafka_metadata_topic *t = &metadata->topics[tpc];
      if (!strcmp(t->topic, config.kafka_avro_schema_topic)) {
          part_cnt = t->partition_cnt;
          break;
      }
    }

    for(part = 0; part < part_cnt; part++) {
      p_kafka_produce_data_to_part(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str), part);
    }
  }
  else {
    p_kafka_produce_data(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str));
  }

  p_kafka_close(&kafka_avro_schema_host, FALSE);
}
Пример #3
0
void kafka_avro_schema_purge(char *avro_schema_str)
{
  struct p_kafka_host kafka_avro_schema_host;

  if (!avro_schema_str || !config.kafka_avro_schema_topic) return;

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  p_kafka_init_host(&kafka_avro_schema_host, config.kafka_config_file);
  p_kafka_connect_to_produce(&kafka_avro_schema_host);
  p_kafka_set_broker(&kafka_avro_schema_host, config.sql_host, config.kafka_broker_port);
  p_kafka_set_topic(&kafka_avro_schema_host, config.kafka_avro_schema_topic);
  p_kafka_set_partition(&kafka_avro_schema_host, config.kafka_partition);
  p_kafka_set_key(&kafka_avro_schema_host, config.kafka_partition_key, config.kafka_partition_keylen);
  p_kafka_set_content_type(&kafka_avro_schema_host, PM_KAFKA_CNT_TYPE_STR);

  p_kafka_produce_data(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str));

  p_kafka_close(&kafka_avro_schema_host, FALSE);
}
Пример #4
0
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  struct pkt_tunnel_primitives *ptun = NULL;
  char *pcust = NULL;
  struct pkt_vlen_hdr_primitives *pvlen = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  struct pkt_tunnel_primitives empty_ptun;
  char *empty_pcust = NULL;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL;
  int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index;
  int mv_num = 0, mv_num_save = 0;
  time_t start, duration;
  pid_t writer_pid = getpid();

  char *json_buf = NULL;
  int json_buf_off = 0;

#ifdef WITH_AVRO
  avro_writer_t avro_writer;
  char *avro_buf = NULL;
  int avro_buffer_full = FALSE;
#endif

  p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file);

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  if (!config.sql_table) config.sql_table = default_kafka_topic;
  else {
    if (strchr(config.sql_table, '$')) {
      is_topic_dyn = TRUE;
      orig_kafka_topic = config.sql_table;
    }
  }

  if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table;

  p_kafka_init_topic_rr(&kafkap_kafka_host);
  p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr);

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);

  p_kafka_connect_to_produce(&kafkap_kafka_host);
  p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port);
  if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table);
  p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition);
  p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen);

  if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR);
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN);
  else {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++)
    stop = P_preprocess_funcs[j](queue, &index, j);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_init_json(config.name, writer_pid);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  if (config.message_broker_output & PRINT_OUTPUT_JSON) {
    if (config.sql_multi_values) {
      json_buf = malloc(config.sql_multi_values);

      if (!json_buf) {
	Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type);
	exit_plugin(1);
      }
      else memset(json_buf, 0, config.sql_multi_values);
    }
  }
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
    if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN;

    avro_buf = malloc(config.avro_buffer_size);

    if (!avro_buf) {
      Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type);
      exit_plugin(1);
    }
    else memset(avro_buf, 0, config.avro_buffer_size);

    avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size);
#endif
  }

  for (j = 0; j < index; j++) {
    void *json_obj;
    char *json_str;

    if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->ptun) ptun = queue[j]->ptun;
    else ptun = &empty_ptun;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->pvlen) pvlen = queue[j]->pvlen;
    else pvlen = NULL;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
      json_t *json_obj = json_object();
      int idx;

      for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]);
      add_writer_name_and_pid_json(json_obj, config.name, writer_pid);

      json_str = compose_json_str(json_obj);
#endif
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema);
      avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                           &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter,
                           queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
                           &queue[j]->basetime, queue[j]->stitch, avro_iface);
      size_t avro_value_size;

      add_writer_name_and_pid_avro(avro_value, config.name, writer_pid);
      avro_value_sizeof(&avro_value, &avro_value_size);

      if (avro_value_size > config.avro_buffer_size) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n",
            config.name, config.type, config.avro_buffer_size);
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n",
            config.name, config.type);
        exit_plugin(1);
      }
      else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) {
        avro_buffer_full = TRUE;
        j--;
      }
      else if (avro_value_write(avro_writer, &avro_value)) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n",
            config.name, config.type, avro_strerror());
        exit_plugin(1);
      }
      else {
        mv_num++;
      }

      avro_value_decref(&avro_value);
      avro_value_iface_decref(avro_iface);
#else
      if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type);
#endif
    }

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      char *tmp_str = NULL;

      if (json_str && config.sql_multi_values) {
        int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0);

	if (json_strlen >= (config.sql_multi_values - json_buf_off)) {
	  if (json_strlen >= config.sql_multi_values) {
	    Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); 
	    exit(1);
	  }

	  tmp_str = json_str;
	  json_str = json_buf;
	}
	else {
	  strcat(json_buf, json_str);
	  mv_num++;

	  string_add_newline(json_buf);
	  json_buf_off = strlen(json_buf);

	  free(json_str);
	  json_str = NULL;
	}
      }

      if (json_str) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

	if (config.sql_multi_values) {
	  json_str = tmp_str;
	  strcpy(json_buf, json_str);

	  mv_num_save = mv_num;
	  mv_num = 1;

          string_add_newline(json_buf);
          json_buf_off = strlen(json_buf);
        }

        free(json_str);
        json_str = NULL;

        if (!ret) {
          if (!config.sql_multi_values) qn++;
          else qn += mv_num_save;
        }
        else break;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_reset(avro_writer);
        avro_buffer_full = FALSE;
        mv_num_save = mv_num;
        mv_num = 0;

        if (!ret) qn += mv_num_save;
        else break;
      }
#endif
    }
  }

  if (config.sql_multi_values) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      if (json_buf && json_buf_off) {
	/* no handling of dyn routing keys here: not compatible */
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf);
	ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf));

	if (!ret) qn += mv_num;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (avro_writer_tell(avro_writer)) {
        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_free(avro_writer);

        if (!ret) qn += mv_num;
      }
#endif
    }
  }

  duration = time(NULL)-start;

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
	sleep(1); /* Let's give a small delay to facilitate purge_close being
		     the last message in batch in case of partitioned topics */
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  p_kafka_close(&kafkap_kafka_host, FALSE);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, qn, saved_index, duration);

  if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); 

  if (empty_pcust) free(empty_pcust);

  if (json_buf) free(json_buf);

#ifdef WITH_AVRO
  if (avro_buf) free(avro_buf);
#endif
}
Пример #5
0
void kafka_cache_purge(struct chained_cache *queue[], int index)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  char *pcust = NULL;
  struct pkt_vlen_hdr_primitives *pvlen = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  char *empty_pcust = NULL;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL;
  int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index;
  int mv_num = 0, mv_num_save = 0;
  time_t start, duration;
  pid_t writer_pid = getpid();

#ifdef WITH_JANSSON
  json_t *array = json_array();
#endif

  p_kafka_init_host(&kafkap_kafka_host);

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  if (!config.sql_table) config.sql_table = default_kafka_topic;
  else {
    if (strchr(config.sql_table, '$')) {
      is_topic_dyn = TRUE;
      orig_kafka_topic = config.sql_table;
      config.sql_table = dyn_kafka_topic;
    }
  }
  if (config.amqp_routing_key_rr) {
    orig_kafka_topic = config.sql_table;
    config.sql_table = dyn_kafka_topic;
  }

  p_kafka_init_topic_rr(&kafkap_kafka_host);
  p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr);

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);

  p_kafka_connect_to_produce(&kafkap_kafka_host);
  p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port);
  p_kafka_set_topic(&kafkap_kafka_host, config.sql_table);
  p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition);
  p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen);
  p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR);

  for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++)
    stop = P_preprocess_funcs[j](queue, &index, j);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  if (config.print_markers) {
    void *json_obj;
    char *json_str;

    json_obj = compose_purge_init_json(writer_pid);
    if (json_obj) json_str = compose_json_str(json_obj);
    if (json_str) {
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;
    }
  }

  for (j = 0; j < index; j++) {
    void *json_obj;
    char *json_str;

    if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->pvlen) pvlen = queue[j]->pvlen;
    else pvlen = NULL;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    json_obj = compose_json(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                         &queue[j]->primitives, pbgp, pnat, pmpls, pcust, pvlen, queue[j]->bytes_counter,
			 queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
			 &queue[j]->basetime, queue[j]->stitch);

    json_str = compose_json_str(json_obj);

#ifdef WITH_JANSSON
    if (json_str && config.sql_multi_values) {
      json_t *elem = NULL;
      char *tmp_str = json_str;
      int do_free = FALSE;

      if (json_array_size(array) >= config.sql_multi_values) {
	json_str = json_dumps(array, 0);
	json_array_clear(array);
        mv_num_save = mv_num;
        mv_num = 0;
      }
      else do_free = TRUE;

      elem = json_loads(tmp_str, 0, NULL);
      json_array_append_new(array, elem);
      mv_num++;

      if (do_free) {
        free(json_str);
        json_str = NULL;
      }
    }
#endif

    if (json_str) {
      if (is_topic_dyn) {
	P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
	p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
      }

      if (config.amqp_routing_key_rr) {
        P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
	p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
      }

      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;

      if (!ret) {
	if (!config.sql_multi_values) qn++;
	else qn += mv_num_save;
      }
      else break;
    }
  }

#ifdef WITH_JANSSON
  if (config.sql_multi_values && json_array_size(array)) {
    char *json_str;

    json_str = json_dumps(array, 0);
    json_array_clear(array);
    json_decref(array);

    if (json_str) {
      /* no handling of dyn routing keys here: not compatible */
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;

      if (!ret) qn += mv_num;
    }
  }
#endif

  duration = time(NULL)-start;

  if (config.print_markers) {
    void *json_obj;
    char *json_str;

    json_obj = compose_purge_close_json(writer_pid, qn, saved_index, duration);
    if (json_obj) json_str = compose_json_str(json_obj);
    if (json_str) {
      Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
      ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

      free(json_str);
      json_str = NULL;
    }
  }

  p_kafka_close(&kafkap_kafka_host, FALSE);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, qn, saved_index, duration);

  if (config.sql_trigger_exec) P_trigger_exec(config.sql_trigger_exec); 

  if (empty_pcust) free(empty_pcust);
}
Пример #6
0
int bgp_peer_log_msg(struct bgp_node *route, struct bgp_info *ri, afi_t afi, safi_t safi, char *event_type, int output, int log_type)
{
  struct bgp_misc_structs *bms;
  char log_rk[SRVBUFLEN];
  struct bgp_peer *peer;
  struct bgp_attr *attr;
  int ret = 0, amqp_ret = 0, kafka_ret = 0, etype = BGP_LOGDUMP_ET_NONE;
  pid_t writer_pid = getpid();

  if (!ri || !ri->peer || !ri->peer->log || !event_type) return ERR;

  peer = ri->peer;
  attr = ri->attr;

  bms = bgp_select_misc_db(peer->type);
  if (!bms) return ERR;

  if (!strcmp(event_type, "dump")) etype = BGP_LOGDUMP_ET_DUMP;
  else if (!strcmp(event_type, "log")) etype = BGP_LOGDUMP_ET_LOG;

#ifdef WITH_RABBITMQ
  if ((bms->msglog_amqp_routing_key && etype == BGP_LOGDUMP_ET_LOG) ||
      (bms->dump_amqp_routing_key && etype == BGP_LOGDUMP_ET_DUMP))
    p_amqp_set_routing_key(peer->log->amqp_host, peer->log->filename);
#endif

#ifdef WITH_KAFKA
  if ((bms->msglog_kafka_topic && etype == BGP_LOGDUMP_ET_LOG) ||
      (bms->dump_kafka_topic && etype == BGP_LOGDUMP_ET_DUMP))
    p_kafka_set_topic(peer->log->kafka_host, peer->log->filename);
#endif

  if (output == PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
    char ip_address[INET6_ADDRSTRLEN];
    json_t *obj = json_object();

    char empty[] = "";
    char prefix_str[INET6_ADDRSTRLEN], nexthop_str[INET6_ADDRSTRLEN];
    char *aspath;

    /* no need for seq for "dump" event_type */
    if (etype == BGP_LOGDUMP_ET_LOG) {
      json_object_set_new_nocheck(obj, "seq", json_integer((json_int_t)bms->log_seq));
      bgp_peer_log_seq_increment(&bms->log_seq);

      switch (log_type) {
      case BGP_LOG_TYPE_UPDATE:
	json_object_set_new_nocheck(obj, "log_type", json_string("update"));
	break;
      case BGP_LOG_TYPE_WITHDRAW:
	json_object_set_new_nocheck(obj, "log_type", json_string("withdraw"));
	break;
      case BGP_LOG_TYPE_DELETE:
	json_object_set_new_nocheck(obj, "log_type", json_string("delete"));
	break;
      default:
        json_object_set_new_nocheck(obj, "log_type", json_integer((json_int_t)log_type));
	break;
      }
    }

    if (etype == BGP_LOGDUMP_ET_LOG)
      json_object_set_new_nocheck(obj, "timestamp", json_string(bms->log_tstamp_str));
    else if (etype == BGP_LOGDUMP_ET_DUMP)
      json_object_set_new_nocheck(obj, "timestamp", json_string(bms->dump.tstamp_str));

    if (bms->bgp_peer_log_msg_extras) bms->bgp_peer_log_msg_extras(peer, output, obj);

    if (ri && ri->extra && ri->extra->bmed.id && bms->bgp_peer_logdump_extra_data)
      bms->bgp_peer_logdump_extra_data(&ri->extra->bmed, output, obj);

    addr_to_str(ip_address, &peer->addr);
    json_object_set_new_nocheck(obj, bms->peer_str, json_string(ip_address));

    json_object_set_new_nocheck(obj, "event_type", json_string(event_type));

    json_object_set_new_nocheck(obj, "afi", json_integer((json_int_t)afi));

    json_object_set_new_nocheck(obj, "safi", json_integer((json_int_t)safi));

    if (route) {
      memset(prefix_str, 0, INET6_ADDRSTRLEN);
      prefix2str(&route->p, prefix_str, INET6_ADDRSTRLEN);
      json_object_set_new_nocheck(obj, "ip_prefix", json_string(prefix_str));
    }

    if (ri && ri->extra && ri->extra->path_id)
      json_object_set_new_nocheck(obj, "as_path_id", json_integer((json_int_t)ri->extra->path_id));

    if (attr) {
      memset(nexthop_str, 0, INET6_ADDRSTRLEN);
      if (attr->mp_nexthop.family) addr_to_str(nexthop_str, &attr->mp_nexthop);
      else inet_ntop(AF_INET, &attr->nexthop, nexthop_str, INET6_ADDRSTRLEN);
      json_object_set_new_nocheck(obj, "bgp_nexthop", json_string(nexthop_str));

      aspath = attr->aspath ? attr->aspath->str : empty;
      json_object_set_new_nocheck(obj, "as_path", json_string(aspath));

      if (attr->community)
	json_object_set_new_nocheck(obj, "comms", json_string(attr->community->str));

      if (attr->ecommunity)
	json_object_set_new_nocheck(obj, "ecomms", json_string(attr->ecommunity->str));

      if (attr->lcommunity)
	json_object_set_new_nocheck(obj, "lcomms", json_string(attr->lcommunity->str));

      json_object_set_new_nocheck(obj, "origin", json_integer((json_int_t)attr->origin));

      json_object_set_new_nocheck(obj, "local_pref", json_integer((json_int_t)attr->local_pref));

      if (attr->med)
	json_object_set_new_nocheck(obj, "med", json_integer((json_int_t)attr->med));
    }

    if (safi == SAFI_MPLS_LABEL || safi == SAFI_MPLS_VPN) {
      u_char label_str[SHORTSHORTBUFLEN];

      if (safi == SAFI_MPLS_VPN) {
        u_char rd_str[SHORTSHORTBUFLEN];

        bgp_rd2str(rd_str, &ri->extra->rd);
	json_object_set_new_nocheck(obj, "rd", json_string(rd_str));
      }

      bgp_label2str(label_str, ri->extra->label);
      json_object_set_new_nocheck(obj, "label", json_string(label_str));
    }

    if ((bms->msglog_file && etype == BGP_LOGDUMP_ET_LOG) ||
	(bms->dump_file && etype == BGP_LOGDUMP_ET_DUMP))
      write_and_free_json(peer->log->fd, obj);

#ifdef WITH_RABBITMQ
    if ((bms->msglog_amqp_routing_key && etype == BGP_LOGDUMP_ET_LOG) ||
	(bms->dump_amqp_routing_key && etype == BGP_LOGDUMP_ET_DUMP)) {
      add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
      amqp_ret = write_and_free_json_amqp(peer->log->amqp_host, obj);
      p_amqp_unset_routing_key(peer->log->amqp_host);
    }
#endif

#ifdef WITH_KAFKA
    if ((bms->msglog_kafka_topic && etype == BGP_LOGDUMP_ET_LOG) ||
        (bms->dump_kafka_topic && etype == BGP_LOGDUMP_ET_DUMP)) {
      add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
      kafka_ret = write_and_free_json_kafka(peer->log->kafka_host, obj);
      p_kafka_unset_topic(peer->log->kafka_host);
    }
#endif
#endif
  }

  return (ret | amqp_ret | kafka_ret);
}
Пример #7
0
int bgp_peer_log_close(struct bgp_peer *peer, int output, int type)
{
  struct bgp_misc_structs *bms = bgp_select_misc_db(type);
  char event_type[] = "log_close";
  struct bgp_peer_log *log_ptr;
  void *amqp_log_ptr, *kafka_log_ptr;
  int ret = 0, amqp_ret = 0, kafka_ret = 0;
  pid_t writer_pid = getpid();

  if (!bms || !peer || !peer->log) return ERR;

#ifdef WITH_RABBITMQ
  if (bms->msglog_amqp_routing_key)
    p_amqp_set_routing_key(peer->log->amqp_host, peer->log->filename);
#endif

#ifdef WITH_KAFKA
  if (bms->msglog_kafka_topic)
    p_kafka_set_topic(peer->log->kafka_host, peer->log->filename);
#endif

  log_ptr = peer->log;
  amqp_log_ptr = peer->log->amqp_host;
  kafka_log_ptr = peer->log->kafka_host;

  assert(peer->log->refcnt);
  peer->log->refcnt--;
  peer->log = NULL;

  if (output == PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
    char ip_address[INET6_ADDRSTRLEN];
    json_t *obj = json_object();

    json_object_set_new_nocheck(obj, "seq", json_integer((json_int_t)bms->log_seq));
    bgp_peer_log_seq_increment(&bms->log_seq);

    json_object_set_new_nocheck(obj, "timestamp", json_string(bms->log_tstamp_str));

    if (bms->bgp_peer_logdump_initclose_extras)
      bms->bgp_peer_logdump_initclose_extras(peer, output, obj);

    addr_to_str(ip_address, &peer->addr);
    json_object_set_new_nocheck(obj, bms->peer_str, json_string(ip_address));

    json_object_set_new_nocheck(obj, "event_type", json_string(event_type));

    if (bms->msglog_file)
      write_and_free_json(log_ptr->fd, obj);

#ifdef WITH_RABBITMQ
    if (bms->msglog_amqp_routing_key) {
      add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
      amqp_ret = write_and_free_json_amqp(amqp_log_ptr, obj);
      p_amqp_unset_routing_key(amqp_log_ptr);
    }
#endif

#ifdef WITH_KAFKA
    if (bms->msglog_kafka_topic) {
      add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
      kafka_ret = write_and_free_json_kafka(kafka_log_ptr, obj);
      p_kafka_unset_topic(kafka_log_ptr);
    }
#endif
#endif
  }

  if (!log_ptr->refcnt) {
    if (bms->msglog_file && !log_ptr->refcnt) {
      fclose(log_ptr->fd);
      memset(log_ptr, 0, sizeof(struct bgp_peer_log));
    }
  }

  return (ret | amqp_ret | kafka_ret);
}
Пример #8
0
int bgp_peer_log_init(struct bgp_peer *peer, int output, int type)
{
  struct bgp_misc_structs *bms = bgp_select_misc_db(type);
  int peer_idx, have_it, ret = 0, amqp_ret = 0, kafka_ret = 0;
  char log_filename[SRVBUFLEN], event_type[] = "log_init";
  pid_t writer_pid = getpid();

  if (!bms || !peer) return ERR;

  if (bms->msglog_file)
    bgp_peer_log_dynname(log_filename, SRVBUFLEN, bms->msglog_file, peer); 

  if (bms->msglog_amqp_routing_key) {
    bgp_peer_log_dynname(log_filename, SRVBUFLEN, bms->msglog_amqp_routing_key, peer); 
  }

  if (bms->msglog_kafka_topic) {
    bgp_peer_log_dynname(log_filename, SRVBUFLEN, bms->msglog_kafka_topic, peer); 
  }

  for (peer_idx = 0, have_it = 0; peer_idx < bms->max_peers; peer_idx++) {
    if (!bms->peers_log[peer_idx].refcnt) {
      if (bms->msglog_file) {
	bms->peers_log[peer_idx].fd = open_output_file(log_filename, "a", FALSE);
	setlinebuf(bms->peers_log[peer_idx].fd);
      }

#ifdef WITH_RABBITMQ
      if (bms->msglog_amqp_routing_key)
        bms->peers_log[peer_idx].amqp_host = bms->msglog_amqp_host;
#endif

#ifdef WITH_KAFKA
      if (bms->msglog_kafka_topic)
        bms->peers_log[peer_idx].kafka_host = bms->msglog_kafka_host;
#endif
      
      strcpy(bms->peers_log[peer_idx].filename, log_filename);
      have_it = TRUE;
      break;
    }
    else if (!strcmp(log_filename, bms->peers_log[peer_idx].filename)) {
      have_it = TRUE;
      break;
    }
  }

  if (have_it) {
    peer->log = &bms->peers_log[peer_idx];
    bms->peers_log[peer_idx].refcnt++;

#ifdef WITH_RABBITMQ
    if (bms->msglog_amqp_routing_key)
      p_amqp_set_routing_key(peer->log->amqp_host, peer->log->filename);

    if (bms->msglog_amqp_routing_key_rr && !p_amqp_get_routing_key_rr(peer->log->amqp_host)) {
      p_amqp_init_routing_key_rr(peer->log->amqp_host);
      p_amqp_set_routing_key_rr(peer->log->amqp_host, bms->msglog_amqp_routing_key_rr);
    }
#endif

#ifdef WITH_KAFKA
    if (bms->msglog_kafka_topic)
      p_kafka_set_topic(peer->log->kafka_host, peer->log->filename);

    if (bms->msglog_kafka_topic_rr && !p_kafka_get_topic_rr(peer->log->kafka_host)) {
      p_kafka_init_topic_rr(peer->log->kafka_host);
      p_kafka_set_topic_rr(peer->log->kafka_host, bms->msglog_kafka_topic_rr);
    }
#endif

    if (output == PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
      char ip_address[INET6_ADDRSTRLEN];
      json_t *obj = json_object();

      json_object_set_new_nocheck(obj, "seq", json_integer((json_int_t)bms->log_seq));
      bgp_peer_log_seq_increment(&bms->log_seq);

      json_object_set_new_nocheck(obj, "timestamp", json_string(bms->log_tstamp_str));

      if (bms->bgp_peer_logdump_initclose_extras)
	bms->bgp_peer_logdump_initclose_extras(peer, output, obj);

      addr_to_str(ip_address, &peer->addr);
      json_object_set_new_nocheck(obj, bms->peer_str, json_string(ip_address));

      json_object_set_new_nocheck(obj, "event_type", json_string(event_type));

      if (bms->bgp_peer_log_msg_extras) bms->bgp_peer_log_msg_extras(peer, output, obj);

      if (bms->msglog_file)
	write_and_free_json(peer->log->fd, obj);

#ifdef WITH_RABBITMQ
      if (bms->msglog_amqp_routing_key) {
	add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
	amqp_ret = write_and_free_json_amqp(peer->log->amqp_host, obj); 
	p_amqp_unset_routing_key(peer->log->amqp_host);
      }
#endif

#ifdef WITH_KAFKA
      if (bms->msglog_kafka_topic) {
	add_writer_name_and_pid_json(obj, config.proc_name, writer_pid);
        kafka_ret = write_and_free_json_kafka(peer->log->kafka_host, obj);
        p_kafka_unset_topic(peer->log->kafka_host);
      }
#endif
#endif
    }
  }

  return (ret | amqp_ret | kafka_ret);
}