int avro_bin_to_json(avro_schema_t schema, const void *val_bin, size_t val_len, char **val_out, size_t *val_len_out) { if (!val_bin) { *val_out = NULL; return 0; } else if (!schema) { log_error("json: got a value where we didn't expect one, and no schema to decode it"); *val_out = NULL; return EINVAL; } avro_reader_t reader = avro_reader_memory(val_bin, val_len); avro_value_iface_t *iface = avro_generic_class_from_schema(schema); if (!iface) { log_error("json: error in avro_generic_class_from_schema: %s", avro_strerror()); avro_reader_free(reader); return EINVAL; } int err; avro_value_t value; err = avro_generic_value_new(iface, &value); if (err) { log_error("json: error in avro_generic_value_new: %s", avro_strerror()); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } err = avro_value_read(reader, &value); if (err) { log_error("json: error decoding Avro value: %s", avro_strerror()); avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } err = avro_value_to_json(&value, 1, val_out); if (err) { log_error("json: error converting Avro value to JSON: %s", avro_strerror()); avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } *val_len_out = strlen(*val_out); // not including null terminator - to librdkafka it's just bytes avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return 0; }
/* Decrements the reference counts of a schema list entry. */ void schema_list_entry_decrefs(schema_list_entry *entry) { avro_reader_free(entry->avro_reader); avro_value_decref(&entry->old_value); avro_value_decref(&entry->row_value); avro_value_iface_decref(entry->row_iface); avro_schema_decref(entry->row_schema); if (entry->key_schema) { avro_value_decref(&entry->key_value); avro_value_iface_decref(entry->key_iface); avro_schema_decref(entry->key_schema); } }
/* { "type": "record", "name": "FinishRequest", "fields": [ {"name": "succeed", "type": "boolean"}, {"name": "diagnostics", "type": "string"} ] } */ static void parse_finish_request(avro_slice_t *slice, bool *succeed, char **diag) { char filename[FILE_NAME_LEN]; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record, succeed_value, diag_value; size_t index; avro_reader_t reader; size_t size; sprintf(filename, "%s/%s", SCHEMA_PATH, "FinishRequestRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); reader = avro_reader_memory(slice->buffer, slice->len); if (avro_value_read(reader, &record)) { fprintf(stderr, "Unable to read record from memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_value_get_by_name(&record, "succeed", &succeed_value, &index); avro_value_get_boolean(&succeed_value, succeed); avro_value_get_by_name(&record, "diagnostics", &diag_value, &index); avro_value_get_string(&diag_value, diag, &size); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); }
void avro_value_decref(avro_value_t *value) { value->iface->decref(value); avro_value_iface_decref(value->iface); value->iface = NULL; value->self = NULL; }
/* typedef struct { int jobid; int vpid; } process_name_t; typedef struct { char *en_vars; char *args; char *host_name; process_name_t proc_name; } launch_context_t; typedef struct { bool is_successful; process_name_t proc_name; } launch_response_t; */ static void build_launch_response(launch_response_t *launch_response_array, int array_size, avro_slice_t **slice) { char filename[FILE_NAME_LEN]; char buf[BUFFER_SIZE]; long len = 0; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; avro_value_t results_value, LaunchResult_value, is_successful_value, name_value, jobid_value, vpid_value; size_t index; int i; avro_writer_t writer; sprintf(filename, "%s/%s", SCHEMA_PATH, "LaunchResponseRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); avro_value_get_by_name(&record, "results", &results_value, &index); for (i = 0; i < array_size; i++) { avro_value_append(&results_value, &LaunchResult_value, &index); avro_value_get_by_name(&LaunchResult_value, "is_successful", &is_successful_value, &index); avro_value_set_boolean(&is_successful_value, launch_response_array[i].is_successful); avro_value_get_by_name(&LaunchResult_value, "name", &name_value, &index); avro_value_get_by_name(&name_value, "jobid", &jobid_value, &index); avro_value_set_int(&jobid_value, launch_response_array[i].proc_name.jobid); avro_value_get_by_name(&name_value, "vpid", &vpid_value, &index); avro_value_set_int(&vpid_value, launch_response_array[i].proc_name.vpid); } /* create a writer with memory buffer */ writer = avro_writer_memory(buf, sizeof(buf)); /* write record to writer (buffer) */ if (avro_value_write(writer, &record)) { fprintf(stderr, "Unable to write record to memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_writer_flush(writer); len = avro_writer_tell(writer); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); *slice = xmalloc(sizeof(avro_slice_t)); (*slice)->buffer = xmalloc(len); (*slice)->len = len; memcpy((*slice)->buffer, buf, len); }
static void output_avro_shutdown(LogicalDecodingContext *ctx) { plugin_state *state = ctx->output_plugin_private; MemoryContextDelete(state->memctx); schema_cache_free(state->schema_cache); avro_value_decref(&state->frame_value); avro_value_iface_decref(state->frame_iface); avro_schema_decref(state->frame_schema); }
void nds_avro_cleanup() { if(ndsschemabuffer != NULL) { free(ndsschemabuffer); ndsschemabuffer=NULL; } if(iface != NULL){ avro_value_iface_decref(iface); iface = NULL; } schema_file_parsed = FALSE; }
void process_file(FILE *input, avro_file_writer_t out, avro_schema_t schema, int verbose, int memstat, int errabort, int strjson, size_t max_str_sz) { json_error_t err; json_t *json; int n = 0; json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); while (!feof(input)) { n++; if (verbose && !(n % 1000)) printf("Processing record %d\n", n); if (!json) { if (errabort) { fprintf(stderr, "JSON error on line %d, column %d, pos %d: %s, aborting.\n", n, err.column, err.position, err.text); return; } fprintf(stderr, "JSON error on line %d, column %d, pos %d: %s, skipping to EOL\n", n, err.column, err.position, err.text); while (getc(input) != '\n' && !feof(input)) {}; json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); continue; } avro_value_t record; avro_value_iface_t *iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); if (!schema_traverse(schema, json, NULL, &record, 0, strjson, max_str_sz)) { if (avro_file_writer_append_value(out, &record)) { fprintf(stderr, "ERROR: avro_file_writer_append_value() FAILED: %s\n", avro_strerror()); exit(EXIT_FAILURE); } } else fprintf(stderr, "Error processing record %d, skipping...\n", n); avro_value_iface_decref(iface); avro_value_decref(&record); json_decref(json); if (memstat && !(n % 1000)) memory_status(); json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); } if (memstat) memory_status(); avro_schema_decref(schema); }
static int l_schema_gc(lua_State *L) { LuaAvroSchema *l_schema = luaL_checkudata(L, 1, MT_AVRO_SCHEMA); if (l_schema->schema != NULL) { avro_schema_decref(l_schema->schema); l_schema->schema = NULL; } if (l_schema->iface != NULL) { avro_value_iface_decref(l_schema->iface); l_schema->iface = NULL; } return 0; }
static void process_file(const char *in_filename, const char *out_filename) { avro_file_reader_t reader; avro_file_writer_t writer; if (in_filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); exit(1); } } else { if (avro_file_reader(in_filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", in_filename, avro_strerror()); exit(1); } } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); if (avro_file_writer_create_with_codec (out_filename, wschema, &writer, codec, block_size)) { fprintf(stderr, "Error creating %s:\n %s\n", out_filename, avro_strerror()); exit(1); } while (avro_file_reader_read_value(reader, &value) == 0) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "Error writing to %s:\n %s\n", out_filename, avro_strerror()); exit(1); } avro_value_reset(&value); } avro_file_reader_close(reader); avro_file_writer_close(writer); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); }
/* Frees all the memory structures associated with a frame reader. */ void frame_reader_free(frame_reader_t reader) { avro_reader_free(reader->avro_reader); avro_value_decref(&reader->frame_value); avro_value_iface_decref(reader->frame_iface); avro_schema_decref(reader->frame_schema); for (int i = 0; i < reader->num_schemas; i++) { schema_list_entry *entry = reader->schemas[i]; schema_list_entry_decrefs(entry); free(entry); } free(reader->schemas); free(reader); }
static int do_close(AvroDeserializer* self) { if (self->flags & DESERIALIZER_READER_OK) { avro_reader_free(self->datum_reader); self->flags &= ~DESERIALIZER_READER_OK; } if (self->flags & DESERIALIZER_SCHEMA_OK) { avro_schema_decref(self->schema); self->flags &= ~DESERIALIZER_SCHEMA_OK; } if (self->iface != NULL) { avro_value_iface_decref(self->iface); self->iface = NULL; } return 0; }
static int read_data() { int rval; int records_read = 0; avro_file_reader_t reader; avro_value_iface_t *iface; avro_value_t value; fprintf(stderr, "\nReading...\n"); rval = avro_file_reader(filename, &reader); if (rval) { fprintf(stderr, "Error: %s\n", avro_strerror()); return -1; } avro_schema_t schema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &value); while ((rval = avro_file_reader_read_value(reader, &value)) == 0) { avro_value_t field; int32_t val; avro_value_get_by_index(&value, 0, &field, NULL); avro_value_get_int(&field, &val); fprintf(stderr, "value = %d\n", val); records_read++; avro_value_reset(&value); } avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(schema); avro_file_reader_close(reader); fprintf(stderr, "read %d records.\n", records_read); if (rval != EOF) { fprintf(stderr, "Error: %s\n", avro_strerror()); return -1; } return records_read; }
static int write_data(int n_records) { int i; avro_schema_t schema; avro_schema_error_t error; avro_file_writer_t writer; avro_value_iface_t *iface; avro_value_t value; fprintf(stderr, "\nWriting...\n"); if (avro_schema_from_json(PERSON_SCHEMA, 0, &schema, &error)) { fprintf(stderr, "Unable to parse schema\n"); return -1; } if (avro_file_writer_create(filename, schema, &writer)) { fprintf(stderr, "There was an error creating file: %s\n", avro_strerror()); return -1; } iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &value); avro_value_t field; avro_value_get_by_index(&value, 0, &field, NULL); avro_value_set_int(&field, 123); for (i = 0; i < n_records; i++) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "There was an error writing file: %s\n", avro_strerror()); return -1; } } if (avro_file_writer_close(writer)) { fprintf(stderr, "There was an error creating file: %s\n", avro_strerror()); return -1; } avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(schema); return n_records; }
static void process_file(const char *filename) { avro_file_reader_t reader; if (filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); exit(1); } } else { if (avro_file_reader(filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", filename, avro_strerror()); exit(1); } } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); while (avro_file_reader_read_value(reader, &value) == 0) { char *json; if (avro_value_to_json(&value, 1, &json)) { fprintf(stderr, "Error converting value to JSON: %s\n", avro_strerror()); } else { printf("%s\n", json); free(json); } avro_value_reset(&value); } avro_file_reader_close(reader); avro_value_decref(&value); avro_value_iface_decref(iface); }
extern int parse_heartbeat_request(avro_slice_t *slice) { char filename[FILE_NAME_LEN]; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; size_t index; avro_reader_t reader; size_t size = 0; sprintf(filename, "%s/%s", SCHEMA_PATH, "HeartBeatRequestRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); reader = avro_reader_memory(slice->buffer, slice->len); if (avro_value_read(reader, &record)) { fprintf(stderr, "Unable to read record from memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_value_get_size(&record, &size); avro_value_iface_decref(iface); avro_schema_decref(schema); // printf("slice->len = %d\n", slice->len); // printf("size = %ld\n", size); // // if (size > 0) { // return 0; // } else { // return -1; // } if (size == 0) { return 0; } else { return -1; } }
static void AvroFileReader_dealloc(AvroFileReader *self) { if (self->iface != NULL) { avro_value_iface_decref(self->iface); } if (self->flags & AVROFILE_SCHEMA_OK) { avro_schema_decref(self->schema); Py_CLEAR(self->schema_json); } if (self->pyfile != NULL) { if (is_open(self)) { avro_file_reader_close(self->reader); } Py_CLEAR(self->pyfile); } self->ob_type->tp_free((PyObject*)self); }
static void read_using_writer_schema(const char *filename) { avro_file_reader_t file; avro_schema_t writer_schema; avro_value_iface_t *writer_iface; avro_value_t writer_value; // Open an Avro file and grab the writer schema that was used to create the // file. check_i(avro_file_reader(filename, &file)); writer_schema = avro_file_reader_get_writer_schema(file); // Then create a value that is an instance of the writer schema. As above, // we use the built-in "generic" value implementation for the value instance // that will actually store the data. check_p(writer_iface = avro_generic_class_from_schema(writer_schema)); check_i(avro_generic_value_new(writer_iface, &writer_value)); // Read values from the file until we run out, printing the contents of each // one. Here, we can read directly into `writer_value` since we know that // it's an instance of the schema that was used to create the file. while (avro_file_reader_read_value(file, &writer_value) == 0) { avro_value_t field; int32_t a; int32_t b; check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_get_int(&field, &a)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_get_int(&field, &b)); printf(" a: %" PRId32 ", b: %" PRId32 "\n", a, b); } // Close the file and clean up after ourselves. avro_file_reader_close(file); avro_value_decref(&writer_value); avro_value_iface_decref(writer_iface); avro_schema_decref(writer_schema); }
int main(void) { avro_schema_t schema = NULL; avro_schema_error_t error; avro_value_iface_t *simple_array_class; avro_value_t simple; /* Initialize the schema structure from JSON */ if (avro_schema_from_json(SIMPLE_ARRAY, sizeof(SIMPLE_ARRAY), &schema, &error)) { fprintf(stdout, "Unable to parse schema\n"); exit(EXIT_FAILURE); } // Create avro class and value simple_array_class = avro_generic_class_from_schema( schema ); if ( simple_array_class == NULL ) { fprintf(stdout, "Unable to create simple array class\n"); exit(EXIT_FAILURE); } if ( avro_generic_value_new( simple_array_class, &simple ) ) { fprintf(stdout, "Error creating instance of record\n" ); exit(EXIT_FAILURE); } // Release the avro class and value avro_value_decref( &simple ); avro_value_iface_decref( simple_array_class ); avro_schema_decref(schema); return 0; }
int process_file(const char *in_filename, const char *out_filename) { avro_file_reader_t reader; avro_file_writer_t writer; if (in_filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); return 1; } } else { if (avro_file_reader(in_filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", in_filename, avro_strerror()); return 1; } } avro_schema_t wschema; wschema = avro_file_reader_get_writer_schema(reader); /* Check that the reader schema is the same as the writer schema */ { avro_schema_t oschema; avro_file_reader_t oreader; if (avro_file_reader(out_filename, &oreader)) { fprintf(stderr, "Error opening %s:\n %s\n", out_filename, avro_strerror()); avro_file_reader_close(reader); return 1; } oschema = avro_file_reader_get_writer_schema(oreader); if (avro_schema_equal(oschema, wschema) == 0) { fprintf(stderr, "Error: reader and writer schema are not equal.\n"); avro_file_reader_close(oreader); avro_file_reader_close(reader); return 1; } avro_file_reader_close(oreader); avro_schema_decref(oschema); } if (avro_file_writer_open(out_filename, &writer)) { fprintf(stderr, "Error opening %s:\n %s\n", out_filename, avro_strerror()); avro_file_reader_close(reader); return 1; } avro_value_iface_t *iface; avro_value_t value; iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); while (avro_file_reader_read_value(reader, &value) == 0) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "Error writing to %s:\n %s\n", out_filename, avro_strerror()); return 1; } avro_value_reset(&value); } avro_file_reader_close(reader); avro_file_writer_close(writer); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); return 0; }
static void process_file(const char *filename) { avro_file_reader_t reader; FILE *fp; int should_close; if (filename == NULL) { fp = stdin; filename = "<stdin>"; should_close = 0; } else { fp = fopen(filename, "rb"); should_close = 1; if (fp == NULL) { fprintf(stderr, "Error opening %s:\n %s\n", filename, strerror(errno)); exit(1); } } if (avro_file_reader_fp(fp, filename, 0, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", filename, avro_strerror()); if (should_close) { fclose(fp); } exit(1); } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); int rval; while ((rval = avro_file_reader_read_value(reader, &value)) == 0) { char *json; if (avro_value_to_json(&value, 1, &json)) { fprintf(stderr, "Error converting value to JSON: %s\n", avro_strerror()); } else { printf("%s\n", json); free(json); } avro_value_reset(&value); } // If it was not an EOF that caused it to fail, // print the error. if (rval != EOF) { fprintf(stderr, "Error: %s\n", avro_strerror()); } avro_file_reader_close(reader); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); if (should_close) { fclose(fp); } }
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action) { struct pkt_primitives *data = NULL; struct pkt_bgp_primitives *pbgp = NULL; struct pkt_nat_primitives *pnat = NULL; struct pkt_mpls_primitives *pmpls = NULL; struct pkt_tunnel_primitives *ptun = NULL; char *pcust = NULL; struct pkt_vlen_hdr_primitives *pvlen = NULL; struct pkt_bgp_primitives empty_pbgp; struct pkt_nat_primitives empty_pnat; struct pkt_mpls_primitives empty_pmpls; struct pkt_tunnel_primitives empty_ptun; char *empty_pcust = NULL; char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN]; char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL; int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index; int mv_num = 0, mv_num_save = 0; time_t start, duration; pid_t writer_pid = getpid(); char *json_buf = NULL; int json_buf_off = 0; #ifdef WITH_AVRO avro_writer_t avro_writer; char *avro_buf = NULL; int avro_buffer_full = FALSE; #endif p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file); /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; if (!config.sql_table) config.sql_table = default_kafka_topic; else { if (strchr(config.sql_table, '$')) { is_topic_dyn = TRUE; orig_kafka_topic = config.sql_table; } } if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table; p_kafka_init_topic_rr(&kafkap_kafka_host); p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr); empty_pcust = malloc(config.cpptrs.len); if (!empty_pcust) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type); exit_plugin(1); } memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives)); memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives)); memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives)); memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives)); memset(empty_pcust, 0, config.cpptrs.len); p_kafka_connect_to_produce(&kafkap_kafka_host); p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port); if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table); p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition); p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen); if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR); else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN); else { Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type); exit_plugin(1); } for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++) stop = P_preprocess_funcs[j](queue, &index, j); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid); start = time(NULL); if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_init_json(config.name, writer_pid); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (config.sql_multi_values) { json_buf = malloc(config.sql_multi_values); if (!json_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(json_buf, 0, config.sql_multi_values); } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN; avro_buf = malloc(config.avro_buffer_size); if (!avro_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(avro_buf, 0, config.avro_buffer_size); avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size); #endif } for (j = 0; j < index; j++) { void *json_obj; char *json_str; if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue; data = &queue[j]->primitives; if (queue[j]->pbgp) pbgp = queue[j]->pbgp; else pbgp = &empty_pbgp; if (queue[j]->pnat) pnat = queue[j]->pnat; else pnat = &empty_pnat; if (queue[j]->pmpls) pmpls = queue[j]->pmpls; else pmpls = &empty_pmpls; if (queue[j]->ptun) ptun = queue[j]->ptun; else ptun = &empty_ptun; if (queue[j]->pcust) pcust = queue[j]->pcust; else pcust = empty_pcust; if (queue[j]->pvlen) pvlen = queue[j]->pvlen; else pvlen = NULL; if (queue[j]->valid == PRINT_CACHE_FREE) continue; if (config.message_broker_output & PRINT_OUTPUT_JSON) { #ifdef WITH_JANSSON json_t *json_obj = json_object(); int idx; for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]); add_writer_name_and_pid_json(json_obj, config.name, writer_pid); json_str = compose_json_str(json_obj); #endif } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema); avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type, &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter, queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags, &queue[j]->basetime, queue[j]->stitch, avro_iface); size_t avro_value_size; add_writer_name_and_pid_avro(avro_value, config.name, writer_pid); avro_value_sizeof(&avro_value, &avro_value_size); if (avro_value_size > config.avro_buffer_size) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n", config.name, config.type, config.avro_buffer_size); Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n", config.name, config.type); exit_plugin(1); } else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) { avro_buffer_full = TRUE; j--; } else if (avro_value_write(avro_writer, &avro_value)) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n", config.name, config.type, avro_strerror()); exit_plugin(1); } else { mv_num++; } avro_value_decref(&avro_value); avro_value_iface_decref(avro_iface); #else if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type); #endif } if (config.message_broker_output & PRINT_OUTPUT_JSON) { char *tmp_str = NULL; if (json_str && config.sql_multi_values) { int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0); if (json_strlen >= (config.sql_multi_values - json_buf_off)) { if (json_strlen >= config.sql_multi_values) { Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); exit(1); } tmp_str = json_str; json_str = json_buf; } else { strcat(json_buf, json_str); mv_num++; string_add_newline(json_buf); json_buf_off = strlen(json_buf); free(json_str); json_str = NULL; } } if (json_str) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); if (config.sql_multi_values) { json_str = tmp_str; strcpy(json_buf, json_str); mv_num_save = mv_num; mv_num = 1; string_add_newline(json_buf); json_buf_off = strlen(json_buf); } free(json_str); json_str = NULL; if (!ret) { if (!config.sql_multi_values) qn++; else qn += mv_num_save; } else break; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_reset(avro_writer); avro_buffer_full = FALSE; mv_num_save = mv_num; mv_num = 0; if (!ret) qn += mv_num_save; else break; } #endif } } if (config.sql_multi_values) { if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (json_buf && json_buf_off) { /* no handling of dyn routing keys here: not compatible */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf); ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf)); if (!ret) qn += mv_num; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (avro_writer_tell(avro_writer)) { ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_free(avro_writer); if (!ret) qn += mv_num; } #endif } } duration = time(NULL)-start; if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { sleep(1); /* Let's give a small delay to facilitate purge_close being the last message in batch in case of partitioned topics */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } p_kafka_close(&kafkap_kafka_host, FALSE); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", config.name, config.type, writer_pid, qn, saved_index, duration); if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); if (empty_pcust) free(empty_pcust); if (json_buf) free(json_buf); #ifdef WITH_AVRO if (avro_buf) free(avro_buf); #endif }
int main(void) { int pass; for (pass = 0 ; json_schemas[pass] ; pass++) { int rval = 0; size_t len; static char buf[4096]; avro_writer_t writer; avro_file_writer_t file_writer; avro_file_reader_t file_reader; avro_schema_t schema = NULL; avro_schema_error_t error = NULL; char outpath[64]; const char *json_schema = json_schemas[pass]; printf("pass %d with schema %s\n", pass, json_schema); check(rval, avro_schema_from_json(json_schema, strlen(json_schema), &schema, &error)); avro_value_iface_t *iface = avro_generic_class_from_schema(schema); avro_value_t val; avro_generic_value_new(iface, &val); avro_value_t out; avro_generic_value_new(iface, &out); /* create the val */ avro_value_reset(&val); avro_value_set_string(&val, "test-1691"); /* Write value to file */ snprintf(outpath, sizeof(outpath), "test-1691-%d.avro", pass); /* create the writers */ writer = avro_writer_memory(buf, sizeof(buf)); check(rval, avro_file_writer_create(outpath, schema, &file_writer)); check(rval, avro_value_write(writer, &val)); len = avro_writer_tell(writer); check(rval, avro_file_writer_append_encoded(file_writer, buf, len)); check(rval, avro_file_writer_close(file_writer)); /* Read the value back */ check(rval, avro_file_reader(outpath, &file_reader)); check(rval, avro_file_reader_read_value(file_reader, &out)); if (!avro_value_equal(&val, &out)) { fprintf(stderr, "fail!\n"); exit(EXIT_FAILURE); } fprintf(stderr, "pass %d: ok: schema %s\n", pass, json_schema); check(rval, avro_file_reader_close(file_reader)); remove(outpath); avro_writer_free(writer); avro_value_decref(&out); avro_value_decref(&val); avro_value_iface_decref(iface); avro_schema_decref(schema); } exit(EXIT_SUCCESS); }
static void read_with_schema_resolution(const char *filename, const char *reader_schema_json, const char *field_name) { avro_file_reader_t file; avro_schema_error_t error; avro_schema_t reader_schema; avro_schema_t writer_schema; avro_value_iface_t *writer_iface; avro_value_iface_t *reader_iface; avro_value_t writer_value; avro_value_t reader_value; // Open an Avro file and grab the writer schema that was used to create the // file. check_i(avro_file_reader(filename, &file)); writer_schema = avro_file_reader_get_writer_schema(file); // Create a value instance that we want to read the data into. Note that // this is *not* the writer schema! check_i(avro_schema_from_json (reader_schema_json, 0, &reader_schema, &error)); check_p(reader_iface = avro_generic_class_from_schema(reader_schema)); check_i(avro_generic_value_new(reader_iface, &reader_value)); // Create a resolved writer that will perform the schema resolution for us. // If the two schemas aren't compatible, this function will return an error, // and the error text should describe which parts of the schemas are // incompatible. check_p(writer_iface = avro_resolved_writer_new(writer_schema, reader_schema)); // Create an instance of the resolved writer, and tell it to wrap our reader // value instance. check_i(avro_resolved_writer_new_value(writer_iface, &writer_value)); avro_resolved_writer_set_dest(&writer_value, &reader_value); // Now we've got the same basic loop as above. But we've got two value // instances floating around! Which do we use? We have the file reader // fill in `writer_value`, since that's the value that is an instance of the // file's writer schema. Since it's an instance of a resolved writer, // though, it doesn't actually store any data itself. Instead, it will // perform schema resolution on the data read from the file, and fill in its // wrapped value (which in our case is `reader_value`). That means that // once the data has been read, we can get its (schema-resolved) contents // via `reader_value`. while (avro_file_reader_read_value(file, &writer_value) == 0) { avro_value_t field; int32_t value; check_i(avro_value_get_by_name(&reader_value, field_name, &field, NULL)); check_i(avro_value_get_int(&field, &value)); printf(" %s: %" PRId32 "\n", field_name, value); } // Close the file and clean up after ourselves. avro_file_reader_close(file); avro_value_decref(&writer_value); avro_value_iface_decref(writer_iface); avro_schema_decref(writer_schema); avro_value_decref(&reader_value); avro_value_iface_decref(reader_iface); avro_schema_decref(reader_schema); }
static void write_data(const char *filename) { avro_file_writer_t file; avro_schema_t writer_schema; avro_schema_error_t error; avro_value_iface_t *writer_iface; avro_value_t writer_value; avro_value_t field; // First parse the JSON schema into the C API's internal schema // representation. check_i(avro_schema_from_json(WRITER_SCHEMA, 0, &writer_schema, &error)); // Then create a value that is an instance of that schema. We use the // built-in "generic" value implementation, which is what you'll usually use // to create value instances that can actually store data. We only need to // create one instance, since we can re-use it for all of the values that // we're going to write into the file. check_p(writer_iface = avro_generic_class_from_schema(writer_schema)); check_i(avro_generic_value_new(writer_iface, &writer_value)); // Open a new data file for writing, and then write a slew of records into // it. check_i(avro_file_writer_create(filename, writer_schema, &file)); /* record 1 */ check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_set_int(&field, 10)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_set_int(&field, 11)); check_i(avro_file_writer_append_value(file, &writer_value)); /* record 2 */ check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_set_int(&field, 20)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_set_int(&field, 21)); check_i(avro_file_writer_append_value(file, &writer_value)); /* record 3 */ check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_set_int(&field, 30)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_set_int(&field, 31)); check_i(avro_file_writer_append_value(file, &writer_value)); /* record 4 */ check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_set_int(&field, 40)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_set_int(&field, 41)); check_i(avro_file_writer_append_value(file, &writer_value)); /* record 5 */ check_i(avro_value_get_by_name(&writer_value, "a", &field, NULL)); check_i(avro_value_set_int(&field, 50)); check_i(avro_value_get_by_name(&writer_value, "b", &field, NULL)); check_i(avro_value_set_int(&field, 51)); check_i(avro_file_writer_append_value(file, &writer_value)); // Close the file and clean up after ourselves. avro_file_writer_close(file); avro_value_decref(&writer_value); avro_value_iface_decref(writer_iface); avro_schema_decref(writer_schema); }
static void build_heartbeat_response(completed_proc_t *completed_proc_array, int array_size, avro_slice_t **slice) { char filename[FILE_NAME_LEN]; char buf[BUFFER_SIZE]; long len = 0; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; avro_value_t completed_processes_value, ProcessStatus_value; avro_value_t name_value, ProcessName_value, jobid_value, vpid_value; avro_value_t state_value; avro_value_t exit_value_value; size_t index; int i; avro_writer_t writer; sprintf(filename, "%s/%s", SCHEMA_PATH, "HeartBeatResponseRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); avro_value_get_by_name(&record, "completed_processes", &completed_processes_value, &index); for (i = 0; i < array_size; i++) { avro_value_append(&completed_processes_value, &ProcessStatus_value, &index); avro_value_get_by_name(&ProcessStatus_value, "name", &name_value, &index); avro_value_get_by_name(&name_value, "jobid", &jobid_value, &index); avro_value_set_int(&jobid_value, completed_proc_array[i].proc_name.jobid); avro_value_get_by_name(&name_value, "vpid", &vpid_value, &index); avro_value_set_int(&vpid_value, completed_proc_array[i].proc_name.vpid); avro_value_get_by_name(&ProcessStatus_value, "state", &state_value, &index); avro_value_set_enum(&state_value, completed_proc_array[i].proc_state); avro_value_get_by_name(&ProcessStatus_value, "exit_value", &exit_value_value, &index); avro_value_set_int(&exit_value_value, completed_proc_array[i].exit_value); } /* create a writer with memory buffer */ writer = avro_writer_memory(buf, sizeof(buf)); /* write record to writer (buffer) */ if (avro_value_write(writer, &record)) { fprintf(stderr, "Unable to write record to memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_writer_flush(writer); len = avro_writer_tell(writer); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); *slice = xmalloc(sizeof(avro_slice_t)); (*slice)->buffer = xmalloc(len); (*slice)->len = len; memcpy((*slice)->buffer, buf, len); }