void kafka_avro_schema_purge(char *avro_schema_str) { struct p_kafka_host kafka_avro_schema_host; int part, part_cnt, tpc; if (!avro_schema_str || !config.kafka_avro_schema_topic) return; /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; p_kafka_init_host(&kafka_avro_schema_host, config.kafka_config_file); p_kafka_connect_to_produce(&kafka_avro_schema_host); p_kafka_set_broker(&kafka_avro_schema_host, config.sql_host, config.kafka_broker_port); p_kafka_set_topic(&kafka_avro_schema_host, config.kafka_avro_schema_topic); p_kafka_set_partition(&kafka_avro_schema_host, config.kafka_partition); p_kafka_set_key(&kafka_avro_schema_host, config.kafka_partition_key, config.kafka_partition_keylen); p_kafka_set_content_type(&kafka_avro_schema_host, PM_KAFKA_CNT_TYPE_STR); if (config.kafka_partition_dynamic) { rd_kafka_resp_err_t err; const struct rd_kafka_metadata *metadata; err = rd_kafka_metadata(kafka_avro_schema_host.rk, 0, kafka_avro_schema_host.topic, &metadata, 100); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to get Kafka metadata for topic %s: %s\n", config.name, config.type, kafka_avro_schema_host.topic, rd_kafka_err2str(err)); exit_plugin(1); } part_cnt = -1; for (tpc = 0 ; tpc < metadata->topic_cnt ; tpc++) { const struct rd_kafka_metadata_topic *t = &metadata->topics[tpc]; if (!strcmp(t->topic, config.kafka_avro_schema_topic)) { part_cnt = t->partition_cnt; break; } } for(part = 0; part < part_cnt; part++) { p_kafka_produce_data_to_part(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str), part); } } else { p_kafka_produce_data(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str)); } p_kafka_close(&kafka_avro_schema_host, FALSE); }
void kafka_avro_schema_purge(char *avro_schema_str) { struct p_kafka_host kafka_avro_schema_host; if (!avro_schema_str || !config.kafka_avro_schema_topic) return; /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; p_kafka_init_host(&kafka_avro_schema_host, config.kafka_config_file); p_kafka_connect_to_produce(&kafka_avro_schema_host); p_kafka_set_broker(&kafka_avro_schema_host, config.sql_host, config.kafka_broker_port); p_kafka_set_topic(&kafka_avro_schema_host, config.kafka_avro_schema_topic); p_kafka_set_partition(&kafka_avro_schema_host, config.kafka_partition); p_kafka_set_key(&kafka_avro_schema_host, config.kafka_partition_key, config.kafka_partition_keylen); p_kafka_set_content_type(&kafka_avro_schema_host, PM_KAFKA_CNT_TYPE_STR); p_kafka_produce_data(&kafka_avro_schema_host, avro_schema_str, strlen(avro_schema_str)); p_kafka_close(&kafka_avro_schema_host, FALSE); }
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action) { struct pkt_primitives *data = NULL; struct pkt_bgp_primitives *pbgp = NULL; struct pkt_nat_primitives *pnat = NULL; struct pkt_mpls_primitives *pmpls = NULL; struct pkt_tunnel_primitives *ptun = NULL; char *pcust = NULL; struct pkt_vlen_hdr_primitives *pvlen = NULL; struct pkt_bgp_primitives empty_pbgp; struct pkt_nat_primitives empty_pnat; struct pkt_mpls_primitives empty_pmpls; struct pkt_tunnel_primitives empty_ptun; char *empty_pcust = NULL; char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN]; char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL; int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index; int mv_num = 0, mv_num_save = 0; time_t start, duration; pid_t writer_pid = getpid(); char *json_buf = NULL; int json_buf_off = 0; #ifdef WITH_AVRO avro_writer_t avro_writer; char *avro_buf = NULL; int avro_buffer_full = FALSE; #endif p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file); /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; if (!config.sql_table) config.sql_table = default_kafka_topic; else { if (strchr(config.sql_table, '$')) { is_topic_dyn = TRUE; orig_kafka_topic = config.sql_table; } } if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table; p_kafka_init_topic_rr(&kafkap_kafka_host); p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr); empty_pcust = malloc(config.cpptrs.len); if (!empty_pcust) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type); exit_plugin(1); } memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives)); memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives)); memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives)); memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives)); memset(empty_pcust, 0, config.cpptrs.len); p_kafka_connect_to_produce(&kafkap_kafka_host); p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port); if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table); p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition); p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen); if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR); else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN); else { Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type); exit_plugin(1); } for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++) stop = P_preprocess_funcs[j](queue, &index, j); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid); start = time(NULL); if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_init_json(config.name, writer_pid); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (config.sql_multi_values) { json_buf = malloc(config.sql_multi_values); if (!json_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(json_buf, 0, config.sql_multi_values); } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN; avro_buf = malloc(config.avro_buffer_size); if (!avro_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(avro_buf, 0, config.avro_buffer_size); avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size); #endif } for (j = 0; j < index; j++) { void *json_obj; char *json_str; if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue; data = &queue[j]->primitives; if (queue[j]->pbgp) pbgp = queue[j]->pbgp; else pbgp = &empty_pbgp; if (queue[j]->pnat) pnat = queue[j]->pnat; else pnat = &empty_pnat; if (queue[j]->pmpls) pmpls = queue[j]->pmpls; else pmpls = &empty_pmpls; if (queue[j]->ptun) ptun = queue[j]->ptun; else ptun = &empty_ptun; if (queue[j]->pcust) pcust = queue[j]->pcust; else pcust = empty_pcust; if (queue[j]->pvlen) pvlen = queue[j]->pvlen; else pvlen = NULL; if (queue[j]->valid == PRINT_CACHE_FREE) continue; if (config.message_broker_output & PRINT_OUTPUT_JSON) { #ifdef WITH_JANSSON json_t *json_obj = json_object(); int idx; for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]); add_writer_name_and_pid_json(json_obj, config.name, writer_pid); json_str = compose_json_str(json_obj); #endif } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema); avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type, &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter, queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags, &queue[j]->basetime, queue[j]->stitch, avro_iface); size_t avro_value_size; add_writer_name_and_pid_avro(avro_value, config.name, writer_pid); avro_value_sizeof(&avro_value, &avro_value_size); if (avro_value_size > config.avro_buffer_size) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n", config.name, config.type, config.avro_buffer_size); Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n", config.name, config.type); exit_plugin(1); } else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) { avro_buffer_full = TRUE; j--; } else if (avro_value_write(avro_writer, &avro_value)) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n", config.name, config.type, avro_strerror()); exit_plugin(1); } else { mv_num++; } avro_value_decref(&avro_value); avro_value_iface_decref(avro_iface); #else if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type); #endif } if (config.message_broker_output & PRINT_OUTPUT_JSON) { char *tmp_str = NULL; if (json_str && config.sql_multi_values) { int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0); if (json_strlen >= (config.sql_multi_values - json_buf_off)) { if (json_strlen >= config.sql_multi_values) { Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); exit(1); } tmp_str = json_str; json_str = json_buf; } else { strcat(json_buf, json_str); mv_num++; string_add_newline(json_buf); json_buf_off = strlen(json_buf); free(json_str); json_str = NULL; } } if (json_str) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); if (config.sql_multi_values) { json_str = tmp_str; strcpy(json_buf, json_str); mv_num_save = mv_num; mv_num = 1; string_add_newline(json_buf); json_buf_off = strlen(json_buf); } free(json_str); json_str = NULL; if (!ret) { if (!config.sql_multi_values) qn++; else qn += mv_num_save; } else break; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_reset(avro_writer); avro_buffer_full = FALSE; mv_num_save = mv_num; mv_num = 0; if (!ret) qn += mv_num_save; else break; } #endif } } if (config.sql_multi_values) { if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (json_buf && json_buf_off) { /* no handling of dyn routing keys here: not compatible */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf); ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf)); if (!ret) qn += mv_num; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (avro_writer_tell(avro_writer)) { ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_free(avro_writer); if (!ret) qn += mv_num; } #endif } } duration = time(NULL)-start; if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { sleep(1); /* Let's give a small delay to facilitate purge_close being the last message in batch in case of partitioned topics */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } p_kafka_close(&kafkap_kafka_host, FALSE); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", config.name, config.type, writer_pid, qn, saved_index, duration); if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); if (empty_pcust) free(empty_pcust); if (json_buf) free(json_buf); #ifdef WITH_AVRO if (avro_buf) free(avro_buf); #endif }
void kafka_cache_purge(struct chained_cache *queue[], int index) { struct pkt_primitives *data = NULL; struct pkt_bgp_primitives *pbgp = NULL; struct pkt_nat_primitives *pnat = NULL; struct pkt_mpls_primitives *pmpls = NULL; char *pcust = NULL; struct pkt_vlen_hdr_primitives *pvlen = NULL; struct pkt_bgp_primitives empty_pbgp; struct pkt_nat_primitives empty_pnat; struct pkt_mpls_primitives empty_pmpls; char *empty_pcust = NULL; char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN]; char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL; int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index; int mv_num = 0, mv_num_save = 0; time_t start, duration; pid_t writer_pid = getpid(); #ifdef WITH_JANSSON json_t *array = json_array(); #endif p_kafka_init_host(&kafkap_kafka_host); /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; if (!config.sql_table) config.sql_table = default_kafka_topic; else { if (strchr(config.sql_table, '$')) { is_topic_dyn = TRUE; orig_kafka_topic = config.sql_table; config.sql_table = dyn_kafka_topic; } } if (config.amqp_routing_key_rr) { orig_kafka_topic = config.sql_table; config.sql_table = dyn_kafka_topic; } p_kafka_init_topic_rr(&kafkap_kafka_host); p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr); empty_pcust = malloc(config.cpptrs.len); if (!empty_pcust) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type); exit_plugin(1); } memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives)); memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives)); memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives)); memset(empty_pcust, 0, config.cpptrs.len); p_kafka_connect_to_produce(&kafkap_kafka_host); p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port); p_kafka_set_topic(&kafkap_kafka_host, config.sql_table); p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition); p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen); p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR); for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++) stop = P_preprocess_funcs[j](queue, &index, j); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid); start = time(NULL); if (config.print_markers) { void *json_obj; char *json_str; json_obj = compose_purge_init_json(writer_pid); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } for (j = 0; j < index; j++) { void *json_obj; char *json_str; if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue; data = &queue[j]->primitives; if (queue[j]->pbgp) pbgp = queue[j]->pbgp; else pbgp = &empty_pbgp; if (queue[j]->pnat) pnat = queue[j]->pnat; else pnat = &empty_pnat; if (queue[j]->pmpls) pmpls = queue[j]->pmpls; else pmpls = &empty_pmpls; if (queue[j]->pcust) pcust = queue[j]->pcust; else pcust = empty_pcust; if (queue[j]->pvlen) pvlen = queue[j]->pvlen; else pvlen = NULL; if (queue[j]->valid == PRINT_CACHE_FREE) continue; json_obj = compose_json(config.what_to_count, config.what_to_count_2, queue[j]->flow_type, &queue[j]->primitives, pbgp, pnat, pmpls, pcust, pvlen, queue[j]->bytes_counter, queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags, &queue[j]->basetime, queue[j]->stitch); json_str = compose_json_str(json_obj); #ifdef WITH_JANSSON if (json_str && config.sql_multi_values) { json_t *elem = NULL; char *tmp_str = json_str; int do_free = FALSE; if (json_array_size(array) >= config.sql_multi_values) { json_str = json_dumps(array, 0); json_array_clear(array); mv_num_save = mv_num; mv_num = 0; } else do_free = TRUE; elem = json_loads(tmp_str, 0, NULL); json_array_append_new(array, elem); mv_num++; if (do_free) { free(json_str); json_str = NULL; } } #endif if (json_str) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; if (!ret) { if (!config.sql_multi_values) qn++; else qn += mv_num_save; } else break; } } #ifdef WITH_JANSSON if (config.sql_multi_values && json_array_size(array)) { char *json_str; json_str = json_dumps(array, 0); json_array_clear(array); json_decref(array); if (json_str) { /* no handling of dyn routing keys here: not compatible */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; if (!ret) qn += mv_num; } } #endif duration = time(NULL)-start; if (config.print_markers) { void *json_obj; char *json_str; json_obj = compose_purge_close_json(writer_pid, qn, saved_index, duration); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } p_kafka_close(&kafkap_kafka_host, FALSE); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", config.name, config.type, writer_pid, qn, saved_index, duration); if (config.sql_trigger_exec) P_trigger_exec(config.sql_trigger_exec); if (empty_pcust) free(empty_pcust); }