コード例 #1
0
ファイル: mysql_plugin.c プロジェクト: Benocs/pmacct
void MY_cache_purge(struct db_cache *queue[], int index, struct insert_data *idata)
{
  struct db_cache *LastElemCommitted = NULL;
  struct logfile lf;
  time_t start;
  int j, stop, ret, go_to_pending, saved_index = index;
  char orig_insert_clause[LONGSRVBUFLEN], orig_update_clause[LONGSRVBUFLEN], orig_lock_clause[LONGSRVBUFLEN];
  char tmpbuf[LONGLONGSRVBUFLEN], tmptable[SRVBUFLEN];
  struct primitives_ptrs prim_ptrs;
  struct pkt_data dummy_data;
  pid_t writer_pid = getpid();

  if (!index) {
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: 0/0, ET: 0) ***\n", config.name, config.type, writer_pid);
    return;
  }

  bed.lf = &lf;
  memset(&lf, 0, sizeof(struct logfile));
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  memset(&dummy_data, 0, sizeof(dummy_data));

  for (j = 0, stop = 0; (!stop) && sql_preprocess_funcs[j]; j++)
    stop = sql_preprocess_funcs[j](queue, &index, j); 
  if (config.what_to_count & COUNT_CLASS)
    sql_invalidate_shadow_entries(queue, &index);
  idata->ten = index;

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  /* re-using pending queries queue stuff from parent and saving clauses */
  memcpy(pending_queries_queue, queue, index*sizeof(struct db_cache *));
  pqq_ptr = index;

  strlcpy(orig_insert_clause, insert_clause, LONGSRVBUFLEN);
  strlcpy(orig_update_clause, update_clause, LONGSRVBUFLEN);
  strlcpy(orig_lock_clause, lock_clause, LONGSRVBUFLEN);

  start:
  memset(&idata->mv, 0, sizeof(struct multi_values));
  memcpy(queue, pending_queries_queue, pqq_ptr*sizeof(struct db_cache *));
  memset(pending_queries_queue, 0, pqq_ptr*sizeof(struct db_cache *));
  index = pqq_ptr; pqq_ptr = 0;

  /* We check for variable substitution in SQL table */ 
  if (idata->dyn_table) {
    time_t stamp = 0;

    memset(tmpbuf, 0, LONGLONGSRVBUFLEN);
    stamp = queue[0]->basetime;
    strlcpy(idata->dyn_table_name, config.sql_table, SRVBUFLEN);
    strlcpy(insert_clause, orig_insert_clause, LONGSRVBUFLEN);
    strlcpy(update_clause, orig_update_clause, LONGSRVBUFLEN);
    strlcpy(lock_clause, orig_lock_clause, LONGSRVBUFLEN);

    prim_ptrs.data = &dummy_data;
    primptrs_set_all_from_db_cache(&prim_ptrs, queue[0]);

    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, insert_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, update_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, lock_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, idata->dyn_table_name, &prim_ptrs);

    strftime_same(insert_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(update_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(lock_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(idata->dyn_table_name, LONGSRVBUFLEN, tmpbuf, &stamp);
    if (config.sql_table_schema) sql_create_table(bed.p, &stamp, &prim_ptrs);
  }

  if (idata->locks == PM_LOCK_EXCLUSIVE) (*sqlfunc_cbr.lock)(bed.p); 

  for (idata->current_queue_elem = 0; idata->current_queue_elem < index; idata->current_queue_elem++) {
    go_to_pending = FALSE;

    if (idata->dyn_table) {
      time_t stamp = 0;

      memset(tmpbuf, 0, LONGLONGSRVBUFLEN); // XXX: pedantic?
      stamp = queue[idata->current_queue_elem]->basetime;
      strlcpy(tmptable, config.sql_table, SRVBUFLEN);

      prim_ptrs.data = &dummy_data;
      primptrs_set_all_from_db_cache(&prim_ptrs, queue[idata->current_queue_elem]);
      handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, tmptable, &prim_ptrs);
      strftime_same(tmptable, LONGSRVBUFLEN, tmpbuf, &stamp);

      if (strncmp(idata->dyn_table_name, tmptable, SRVBUFLEN)) {
	pending_queries_queue[pqq_ptr] = queue[idata->current_queue_elem];

	pqq_ptr++;
        go_to_pending = TRUE;
      }
    }

    if (!go_to_pending) {
      if (queue[idata->current_queue_elem]->valid)
        sql_query(&bed, queue[idata->current_queue_elem], idata);
      if (queue[idata->current_queue_elem]->valid == SQL_CACHE_COMMITTED)
        LastElemCommitted = queue[idata->current_queue_elem];
    }
  }

  /* multi-value INSERT query: wrap-up */
  if (idata->mv.buffer_elem_num) {
    idata->mv.last_queue_elem = TRUE;
    sql_query(&bed, LastElemCommitted, idata);
    idata->qn--; /* increased by sql_query() one time too much */
  }

  /* rewinding stuff */
  if (idata->locks == PM_LOCK_EXCLUSIVE) (*sqlfunc_cbr.unlock)(&bed);
  if ((lf.fail) || (b.fail)) Log(LOG_ALERT, "ALERT ( %s/%s ): recovery for MySQL daemon failed.\n", config.name, config.type);

  /* If we have pending queries then start again */
  if (pqq_ptr) goto start;
  
  idata->elap_time = time(NULL)-start; 
  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", 
		config.name, config.type, writer_pid, idata->qn, saved_index, idata->elap_time); 

  if (config.sql_trigger_exec) {
    if (!config.debug) idata->elap_time = time(NULL)-start;
    SQL_SetENV_child(idata);
  }
}
コード例 #2
0
ファイル: pgsql_plugin.c プロジェクト: WuHan0608/pmacct
void PG_cache_purge(struct db_cache *queue[], int index, struct insert_data *idata)
{
  PGresult *ret;
  struct logfile lf;
  struct db_cache **reprocess_queries_queue, **bulk_reprocess_queries_queue;
  char orig_insert_clause[LONGSRVBUFLEN], orig_update_clause[LONGSRVBUFLEN], orig_lock_clause[LONGSRVBUFLEN];
  char orig_copy_clause[LONGSRVBUFLEN], tmpbuf[LONGLONGSRVBUFLEN], tmptable[SRVBUFLEN];
  time_t start;
  int j, r, reprocess = 0, stop, go_to_pending, reprocess_idx, bulk_reprocess_idx, saved_index = index;
  struct primitives_ptrs prim_ptrs;
  struct pkt_data dummy_data;
  pid_t writer_pid = getpid();

  if (!index) {
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
    Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: 0/0, ET: 0) ***\n", config.name, config.type, writer_pid);
    return;
  }

  bed.lf = &lf;
  memset(&lf, 0, sizeof(struct logfile));
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  memset(&dummy_data, 0, sizeof(dummy_data));

  reprocess_queries_queue = (struct db_cache **) malloc(qq_size*sizeof(struct db_cache *));
  bulk_reprocess_queries_queue = (struct db_cache **) malloc(qq_size*sizeof(struct db_cache *));
  if (!reprocess_queries_queue || !bulk_reprocess_queries_queue) {
    Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (reprocess_queries_queue). Exiting ..\n", config.name, config.type);
    exit_plugin(1);
  }

  for (j = 0, stop = 0; (!stop) && sql_preprocess_funcs[j]; j++) 
    stop = sql_preprocess_funcs[j](queue, &index, j);
  if (config.what_to_count & COUNT_CLASS)
    sql_invalidate_shadow_entries(queue, &index);
  idata->ten = index;

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  /* re-using pending queries queue stuff from parent and saving clauses */
  memcpy(pending_queries_queue, queue, index*sizeof(struct db_cache *));
  pqq_ptr = index;

  strlcpy(orig_copy_clause, copy_clause, LONGSRVBUFLEN);
  strlcpy(orig_insert_clause, insert_clause, LONGSRVBUFLEN);
  strlcpy(orig_update_clause, update_clause, LONGSRVBUFLEN);
  strlcpy(orig_lock_clause, lock_clause, LONGSRVBUFLEN);

  start:
  memcpy(queue, pending_queries_queue, pqq_ptr*sizeof(struct db_cache *));
  memset(pending_queries_queue, 0, pqq_ptr*sizeof(struct db_cache *));
  index = pqq_ptr; pqq_ptr = 0;

  /* We check for variable substitution in SQL table */
  if (idata->dyn_table) {
    time_t stamp = 0;

    memset(tmpbuf, 0, LONGLONGSRVBUFLEN);
    stamp = queue[0]->basetime;

    prim_ptrs.data = &dummy_data;
    primptrs_set_all_from_db_cache(&prim_ptrs, queue[0]);

    strlcpy(idata->dyn_table_name, config.sql_table, SRVBUFLEN);
    strlcpy(insert_clause, orig_insert_clause, LONGSRVBUFLEN);
    strlcpy(update_clause, orig_update_clause, LONGSRVBUFLEN);
    strlcpy(lock_clause, orig_lock_clause, LONGSRVBUFLEN);

    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, copy_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, insert_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, update_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, lock_clause, &prim_ptrs);
    handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, idata->dyn_table_name, &prim_ptrs);

    strftime_same(copy_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(insert_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(update_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(lock_clause, LONGSRVBUFLEN, tmpbuf, &stamp);
    strftime_same(idata->dyn_table_name, LONGSRVBUFLEN, tmpbuf, &stamp);

    if (config.sql_table_schema) sql_create_table(bed.p, &stamp, &prim_ptrs); 
  }

  /* beginning DB transaction */
  (*sqlfunc_cbr.lock)(bed.p);

  /* for each element of the queue to be processed we execute sql_query(); the function
     returns a non-zero value if DB has failed; then we use reprocess_queries_queue and
     bulk_reprocess_queries_queue to handle reprocessing of specific elements or bulk
     queue (of elements not being held in a pending_queries_queue) due to final COMMIT
     failure */

  memset(reprocess_queries_queue, 0, qq_size*sizeof(struct db_cache *));
  memset(bulk_reprocess_queries_queue, 0, qq_size*sizeof(struct db_cache *));
  reprocess_idx = 0; bulk_reprocess_idx = 0;

  for (j = 0; j < index; j++) {
    go_to_pending = FALSE;

    if (idata->dyn_table) {
      time_t stamp = 0;

      memset(tmpbuf, 0, LONGLONGSRVBUFLEN); // XXX: pedantic?
      stamp = queue[idata->current_queue_elem]->basetime;
      strlcpy(tmptable, config.sql_table, SRVBUFLEN);

      prim_ptrs.data = &dummy_data;
      primptrs_set_all_from_db_cache(&prim_ptrs, queue[idata->current_queue_elem]);
      handle_dynname_internal_strings_same(tmpbuf, LONGSRVBUFLEN, tmptable, &prim_ptrs);
      strftime_same(tmptable, LONGSRVBUFLEN, tmpbuf, &stamp);

      if (strncmp(idata->dyn_table_name, tmptable, SRVBUFLEN)) {
        pending_queries_queue[pqq_ptr] = queue[idata->current_queue_elem];

        pqq_ptr++;
        go_to_pending = TRUE;
      }
    }

    if (!go_to_pending) { 
      if (queue[j]->valid) {
	r = sql_query(&bed, queue[j], idata);

	/* note down all elements in case of a reprocess due to COMMIT failure */
	bulk_reprocess_queries_queue[bulk_reprocess_idx] = queue[j];
	bulk_reprocess_idx++;
      }
      else r = FALSE; /* not valid elements are marked as not to be reprocessed */ 
      if (r) {
        reprocess_queries_queue[reprocess_idx] = queue[j];
        reprocess_idx++;

	if (!reprocess) sql_db_fail(&p);
        reprocess = REPROCESS_SPECIFIC;
      }
    }
  }

  /* Finalizing DB transaction */
  if (!p.fail) {
    if (config.sql_use_copy) {
      if (PQputCopyEnd(p.desc, NULL) < 0) Log(LOG_ERR, "ERROR ( %s/%s ): COPY failed!\n\n", config.name, config.type); 
    }

    ret = PQexec(p.desc, "COMMIT");
    if (PQresultStatus(ret) != PGRES_COMMAND_OK) {
      if (!reprocess) sql_db_fail(&p);
      reprocess = REPROCESS_BULK;
    }
    PQclear(ret);
  }

  /* don't reprocess free (SQL_CACHE_FREE) and already recovered (SQL_CACHE_ERROR) elements */
  if (p.fail) {
    if (reprocess == REPROCESS_SPECIFIC) {
      for (j = 0; j < reprocess_idx; j++) {
        if (reprocess_queries_queue[j]->valid == SQL_CACHE_COMMITTED) sql_query(&bed, reprocess_queries_queue[j], idata);
      }
    }
    else if (reprocess == REPROCESS_BULK) {
      for (j = 0; j < bulk_reprocess_idx; j++) {
        if (bulk_reprocess_queries_queue[j]->valid == SQL_CACHE_COMMITTED) sql_query(&bed, bulk_reprocess_queries_queue[j], idata);
      }
    }
  }

  if (b.connected) {
    if (config.sql_use_copy) {
      if (PQputCopyEnd(b.desc, NULL) < 0) Log(LOG_ERR, "ERROR ( %s/%s ): COPY failed!\n\n", config.name, config.type);
    }
    ret = PQexec(b.desc, "COMMIT");
    if (PQresultStatus(ret) != PGRES_COMMAND_OK) sql_db_fail(&b);
    PQclear(ret);
  }

  /* rewinding stuff */
  if (lf.file) PG_file_close(&lf);
  if (lf.fail || b.fail) Log(LOG_ALERT, "ALERT ( %s/%s ): recovery for PgSQL operation failed.\n", config.name, config.type);

  /* If we have pending queries then start again */
  if (pqq_ptr) goto start;

  idata->elap_time = time(NULL)-start;
  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, idata->qn, saved_index, idata->elap_time);

  if (config.sql_trigger_exec) {
    if (!config.debug) idata->elap_time = time(NULL)-start;
    SQL_SetENV_child(idata);
  }
}
コード例 #3
0
ファイル: kafka_plugin.c プロジェクト: jrossi/pmacct-1
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action)
{
  struct pkt_primitives *data = NULL;
  struct pkt_bgp_primitives *pbgp = NULL;
  struct pkt_nat_primitives *pnat = NULL;
  struct pkt_mpls_primitives *pmpls = NULL;
  struct pkt_tunnel_primitives *ptun = NULL;
  char *pcust = NULL;
  struct pkt_vlen_hdr_primitives *pvlen = NULL;
  struct pkt_bgp_primitives empty_pbgp;
  struct pkt_nat_primitives empty_pnat;
  struct pkt_mpls_primitives empty_pmpls;
  struct pkt_tunnel_primitives empty_ptun;
  char *empty_pcust = NULL;
  char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN];
  char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL;
  char elem_part_key[SRVBUFLEN], tmpbuf[SRVBUFLEN];
  int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index;
  int mv_num = 0, mv_num_save = 0;
  time_t start, duration;
  struct primitives_ptrs prim_ptrs;
  struct pkt_data dummy_data;
  pid_t writer_pid = getpid();

  char *json_buf = NULL;
  int json_buf_off = 0;

#ifdef WITH_AVRO
  avro_writer_t avro_writer;
  char *avro_buf = NULL;
  int avro_buffer_full = FALSE;
#endif

  p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file);

  /* setting some defaults */
  if (!config.sql_host) config.sql_host = default_kafka_broker_host;
  if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port;

  if (!config.sql_table) config.sql_table = default_kafka_topic;
  else {
    if (strchr(config.sql_table, '$')) {
      is_topic_dyn = TRUE;
      orig_kafka_topic = config.sql_table;
    }
  }

  if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table;

  p_kafka_init_topic_rr(&kafkap_kafka_host);
  p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr);

  empty_pcust = malloc(config.cpptrs.len);
  if (!empty_pcust) {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives));
  memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives));
  memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives));
  memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives));
  memset(empty_pcust, 0, config.cpptrs.len);
  memset(&prim_ptrs, 0, sizeof(prim_ptrs));
  memset(&dummy_data, 0, sizeof(dummy_data));
  memset(tmpbuf, 0, sizeof(tmpbuf));

  p_kafka_connect_to_produce(&kafkap_kafka_host);
  p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port);
  if (strchr(config.kafka_partition_key, '$')) dyn_partition_key = TRUE;
  else dyn_partition_key = FALSE;

  if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table);

  if (config.kafka_partition_dynamic && !config.kafka_partition_key) {
    Log(LOG_ERR, "ERROR ( %s/%s ): kafka_partition_dynamic needs a kafka_partition_key to operate. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }
  if (config.kafka_partition_dynamic && config.kafka_partition) {
    Log(LOG_ERR, "ERROR ( %s/%s ): kafka_partition_dynamic and kafka_partition are mutually exclusive. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  if (config.kafka_partition_dynamic) config.kafka_partition = RD_KAFKA_PARTITION_UA;

  p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition);

  if (!dyn_partition_key)
    p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen);

  if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR);
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN);
  else {
    Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type);
    exit_plugin(1);
  }

  for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++)
    stop = P_preprocess_funcs[j](queue, &index, j);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid);
  start = time(NULL);

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_init_json(config.name, writer_pid);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  if (config.message_broker_output & PRINT_OUTPUT_JSON) {
    if (config.sql_multi_values) {
      json_buf = malloc(config.sql_multi_values);

      if (!json_buf) {
	Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type);
	exit_plugin(1);
      }
      else memset(json_buf, 0, config.sql_multi_values);
    }
  }
  else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
    if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN;

    avro_buf = malloc(config.avro_buffer_size);

    if (!avro_buf) {
      Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type);
      exit_plugin(1);
    }
    else memset(avro_buf, 0, config.avro_buffer_size);

    avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size);
#endif
  }

  for (j = 0; j < index; j++) {
    void *json_obj;
    char *json_str;

    if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue;

    data = &queue[j]->primitives;
    if (queue[j]->pbgp) pbgp = queue[j]->pbgp;
    else pbgp = &empty_pbgp;

    if (queue[j]->pnat) pnat = queue[j]->pnat;
    else pnat = &empty_pnat;

    if (queue[j]->pmpls) pmpls = queue[j]->pmpls;
    else pmpls = &empty_pmpls;

    if (queue[j]->ptun) ptun = queue[j]->ptun;
    else ptun = &empty_ptun;

    if (queue[j]->pcust) pcust = queue[j]->pcust;
    else pcust = empty_pcust;

    if (queue[j]->pvlen) pvlen = queue[j]->pvlen;
    else pvlen = NULL;

    if (queue[j]->valid == PRINT_CACHE_FREE) continue;

    if (dyn_partition_key) {
      prim_ptrs.data = &dummy_data;
      primptrs_set_all_from_chained_cache(&prim_ptrs, queue[j]);
      memset(tmpbuf, 0, SRVBUFLEN);
      strlcpy(elem_part_key, config.kafka_partition_key, SRVBUFLEN);
      handle_dynname_internal_strings_same(tmpbuf, SRVBUFLEN, elem_part_key, &prim_ptrs);
      p_kafka_set_key(&kafkap_kafka_host, elem_part_key, strlen(elem_part_key));
    }

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
#ifdef WITH_JANSSON
      json_t *json_obj = json_object();
      int idx;

      for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]);
      add_writer_name_and_pid_json(json_obj, config.name, writer_pid);

      json_str = compose_json_str(json_obj);
#endif
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema);
      avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type,
                           &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter,
                           queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags,
                           &queue[j]->basetime, queue[j]->stitch, avro_iface);
      size_t avro_value_size;

      add_writer_name_and_pid_avro(avro_value, config.name, writer_pid);
      avro_value_sizeof(&avro_value, &avro_value_size);

      if (avro_value_size > config.avro_buffer_size) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n",
            config.name, config.type, config.avro_buffer_size);
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n",
            config.name, config.type);
        exit_plugin(1);
      }
      else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) {
        avro_buffer_full = TRUE;
        j--;
      }
      else if (avro_value_write(avro_writer, &avro_value)) {
        Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n",
            config.name, config.type, avro_strerror());
        exit_plugin(1);
      }
      else {
        mv_num++;
      }

      avro_value_decref(&avro_value);
      avro_value_iface_decref(avro_iface);
#else
      if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type);
#endif
    }

    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      char *tmp_str = NULL;

      if (json_str && config.sql_multi_values) {
        int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0);

	if (json_strlen >= (config.sql_multi_values - json_buf_off)) {
	  if (json_strlen >= config.sql_multi_values) {
	    Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); 
	    exit(1);
	  }

	  tmp_str = json_str;
	  json_str = json_buf;
	}
	else {
	  strcat(json_buf, json_str);
	  mv_num++;

	  string_add_newline(json_buf);
	  json_buf_off = strlen(json_buf);

	  free(json_str);
	  json_str = NULL;
	}
      }

      if (json_str) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

	if (config.sql_multi_values) {
	  json_str = tmp_str;
	  strcpy(json_buf, json_str);

	  mv_num_save = mv_num;
	  mv_num = 1;

          string_add_newline(json_buf);
          json_buf_off = strlen(json_buf);
        }

        free(json_str);
        json_str = NULL;

        if (!ret) {
          if (!config.sql_multi_values) qn++;
          else qn += mv_num_save;
        }
        else break;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) {
        if (is_topic_dyn) {
          P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        if (config.amqp_routing_key_rr) {
          P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr);
          p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic);
        }

        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_reset(avro_writer);
        avro_buffer_full = FALSE;
        mv_num_save = mv_num;
        mv_num = 0;

        if (!ret) qn += mv_num_save;
        else break;
      }
#endif
    }
  }

  if (config.sql_multi_values) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON) {
      if (json_buf && json_buf_off) {
	/* no handling of dyn routing keys here: not compatible */
	Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf);
	ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf));

	if (!ret) qn += mv_num;
      }
    }
    else if (config.message_broker_output & PRINT_OUTPUT_AVRO) {
#ifdef WITH_AVRO
      if (avro_writer_tell(avro_writer)) {
        ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer));
        avro_writer_free(avro_writer);

        if (!ret) qn += mv_num;
      }
#endif
    }
  }

  duration = time(NULL)-start;

  if (config.print_markers) {
    if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) {
      void *json_obj;
      char *json_str;

      json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration);

      if (json_obj) json_str = compose_json_str(json_obj);
      if (json_str) {
	sleep(1); /* Let's give a small delay to facilitate purge_close being
		     the last message in batch in case of partitioned topics */
        Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str);
        ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str));

        free(json_str);
        json_str = NULL;
      }
    }
  }

  p_kafka_close(&kafkap_kafka_host, FALSE);

  Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n",
		config.name, config.type, writer_pid, qn, saved_index, duration);

  if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); 

  if (empty_pcust) free(empty_pcust);

  if (json_buf) free(json_buf);

#ifdef WITH_AVRO
  if (avro_buf) free(avro_buf);
#endif
}