int avro_bin_to_json(avro_schema_t schema, const void *val_bin, size_t val_len, char **val_out, size_t *val_len_out) { if (!val_bin) { *val_out = NULL; return 0; } else if (!schema) { log_error("json: got a value where we didn't expect one, and no schema to decode it"); *val_out = NULL; return EINVAL; } avro_reader_t reader = avro_reader_memory(val_bin, val_len); avro_value_iface_t *iface = avro_generic_class_from_schema(schema); if (!iface) { log_error("json: error in avro_generic_class_from_schema: %s", avro_strerror()); avro_reader_free(reader); return EINVAL; } int err; avro_value_t value; err = avro_generic_value_new(iface, &value); if (err) { log_error("json: error in avro_generic_value_new: %s", avro_strerror()); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } err = avro_value_read(reader, &value); if (err) { log_error("json: error decoding Avro value: %s", avro_strerror()); avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } err = avro_value_to_json(&value, 1, val_out); if (err) { log_error("json: error converting Avro value to JSON: %s", avro_strerror()); avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return err; } *val_len_out = strlen(*val_out); // not including null terminator - to librdkafka it's just bytes avro_value_decref(&value); avro_value_iface_decref(iface); avro_reader_free(reader); return 0; }
void write_read_check(avro_schema_t writers_schema, avro_datum_t datum, avro_schema_t readers_schema, avro_datum_t expected, char *type) { avro_datum_t datum_out; int validate; for (validate = 0; validate <= 1; validate++) { reader = avro_reader_memory(buf, sizeof(buf)); writer = avro_writer_memory(buf, sizeof(buf)); if (!expected) { expected = datum; } /* Validating read/write */ if (avro_write_data (writer, validate ? writers_schema : NULL, datum)) { fprintf(stderr, "Unable to write %s validate=%d\n %s\n", type, validate, avro_strerror()); exit(EXIT_FAILURE); } int64_t size = avro_size_data(writer, validate ? writers_schema : NULL, datum); if (size != avro_writer_tell(writer)) { fprintf(stderr, "Unable to calculate size %s validate=%d " "(%"PRId64" != %"PRId64")\n %s\n", type, validate, size, avro_writer_tell(writer), avro_strerror()); exit(EXIT_FAILURE); } if (avro_read_data (reader, writers_schema, readers_schema, &datum_out)) { fprintf(stderr, "Unable to read %s validate=%d\n %s\n", type, validate, avro_strerror()); fprintf(stderr, " %s\n", avro_strerror()); exit(EXIT_FAILURE); } if (!avro_datum_equal(expected, datum_out)) { fprintf(stderr, "Unable to encode/decode %s validate=%d\n %s\n", type, validate, avro_strerror()); exit(EXIT_FAILURE); } avro_reader_dump(reader, stderr); avro_datum_decref(datum_out); avro_reader_free(reader); avro_writer_free(writer); } }
static void *worker_thread( void *context ) #endif { /* worker thread set an error, request the error stack and validate it contains the error saved. later it appends another error to the error stack, and validate it contains the two errors. */ TEST_THREAD_DATA *thread_context = (TEST_THREAD_DATA *)context; char first_error_buffer[1024] = ""; char second_error_buffer[1024] = ""; char full_error_buffer[1024] = ""; const char *error_stack = NULL; int index = thread_context->index; unsigned sleep_interval_millis = thread_context->sleep_interval_millis; //set a thread specific error snprintf( first_error_buffer, sizeof(first_error_buffer), "thread %d set an error", index ); avro_set_error( "%s", first_error_buffer ); SLEEP_MILLIS( sleep_interval_millis ); //validate error stack contains the thread specific error error_stack = avro_strerror(); if ( strcmp( error_stack, first_error_buffer ) != 0 ) { thread_context->error_occured = 1; snprintf( thread_context->error_message, sizeof(thread_context->error_message), "invalid error stack found: expected '%s' found '%s'", first_error_buffer, error_stack ); } //set another thread specific error SLEEP_MILLIS( sleep_interval_millis ); snprintf( second_error_buffer, sizeof(second_error_buffer), "thread %d set ANOTHER error...", index ); avro_prefix_error( "%s", second_error_buffer ); snprintf( full_error_buffer, sizeof(full_error_buffer), "%s%s", second_error_buffer, first_error_buffer ); //validate error stack contains the 2 errors as expected SLEEP_MILLIS( sleep_interval_millis ); error_stack = avro_strerror(); if ( strcmp( error_stack, full_error_buffer ) != 0 ) { thread_context->error_occured = 1; snprintf( thread_context->error_message, sizeof(thread_context->error_message), "invalid error stack found: expected '%s' found '%s'", full_error_buffer, error_stack ); } return 0; }
static void output_avro_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change) { int err = 0; HeapTuple oldtuple = NULL, newtuple = NULL; plugin_state *state = ctx->output_plugin_private; MemoryContext oldctx = MemoryContextSwitchTo(state->memctx); reset_frame(state); switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: if (!change->data.tp.newtuple) { elog(ERROR, "output_avro_change: insert action without a tuple"); } newtuple = &change->data.tp.newtuple->tuple; err = update_frame_with_insert(&state->frame_value, state->schema_cache, rel, RelationGetDescr(rel), newtuple); break; case REORDER_BUFFER_CHANGE_UPDATE: if (!change->data.tp.newtuple) { elog(ERROR, "output_avro_change: update action without a tuple"); } if (change->data.tp.oldtuple) { oldtuple = &change->data.tp.oldtuple->tuple; } newtuple = &change->data.tp.newtuple->tuple; err = update_frame_with_update(&state->frame_value, state->schema_cache, rel, oldtuple, newtuple); break; case REORDER_BUFFER_CHANGE_DELETE: if (change->data.tp.oldtuple) { oldtuple = &change->data.tp.oldtuple->tuple; } err = update_frame_with_delete(&state->frame_value, state->schema_cache, rel, oldtuple); break; default: elog(ERROR, "output_avro_change: unknown change action %d", change->action); } if (err) { elog(ERROR, "output_avro_change: row conversion failed: %s", avro_strerror()); } if (write_frame(ctx, state)) { elog(ERROR, "output_avro_change: writing Avro binary failed: %s", avro_strerror()); } MemoryContextSwitchTo(oldctx); MemoryContextReset(state->memctx); }
static void process_file(const char *in_filename, const char *out_filename) { avro_file_reader_t reader; avro_file_writer_t writer; if (in_filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); exit(1); } } else { if (avro_file_reader(in_filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", in_filename, avro_strerror()); exit(1); } } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); if (avro_file_writer_create_with_codec (out_filename, wschema, &writer, codec, block_size)) { fprintf(stderr, "Error creating %s:\n %s\n", out_filename, avro_strerror()); exit(1); } while (avro_file_reader_read_value(reader, &value) == 0) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "Error writing to %s:\n %s\n", out_filename, avro_strerror()); exit(1); } avro_value_reset(&value); } avro_file_reader_close(reader); avro_file_writer_close(writer); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); }
static void output_avro_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { plugin_state *state = ctx->output_plugin_private; MemoryContext oldctx = MemoryContextSwitchTo(state->memctx); reset_frame(state); if (update_frame_with_begin_txn(&state->frame_value, txn)) { elog(ERROR, "output_avro_begin_txn: Avro conversion failed: %s", avro_strerror()); } if (write_frame(ctx, state)) { elog(ERROR, "output_avro_begin_txn: writing Avro binary failed: %s", avro_strerror()); } MemoryContextSwitchTo(oldctx); MemoryContextReset(state->memctx); }
static int l_schema_new_raw_value(lua_State *L) { LuaAvroSchema *l_schema = luaL_checkudata(L, 1, MT_AVRO_SCHEMA); if (l_schema->iface == NULL) { l_schema->iface = avro_generic_class_from_schema(l_schema->schema); if (l_schema->iface == NULL) { lua_pushstring(L, avro_strerror()); return lua_error(L); } } if (lua_gettop(L) >= 2) { LuaAvroValue *l_value = luaL_checkudata(L, 2, MT_AVRO_VALUE); if (l_value->should_decref && l_value->value.self != NULL) { avro_value_decref(&l_value->value); } check(avro_generic_value_new(l_schema->iface, &l_value->value)); l_value->should_decref = true; lua_pushvalue(L, 2); } else { avro_value_t value; check(avro_generic_value_new(l_schema->iface, &value)); lua_avro_push_value(L, &value, true); } return 1; }
static PyObject * create_types_func(PyObject *self, PyObject *args) { int rval; avro_schema_t schema; PyObject *schema_json; ConvertInfo info; PyObject *schema_json_bytes; if (!PyArg_ParseTuple(args, "O", &schema_json)) { return NULL; } schema_json_bytes = pystring_to_pybytes(schema_json); rval = avro_schema_from_json(pybytes_to_chars(schema_json_bytes), 0, &schema, NULL); Py_DECREF(schema_json_bytes); if (rval != 0 || schema == NULL) { PyErr_Format(PyExc_IOError, "Error reading schema: %s", avro_strerror()); return NULL; } info.types = PyObject_CallFunctionObjArgs((PyObject *)get_avro_types_type(), NULL); if (info.types == NULL) { /* XXX: is the exception already set? */ return NULL; } declare_types(&info, schema); return info.types; }
/* { "type": "record", "name": "FinishRequest", "fields": [ {"name": "succeed", "type": "boolean"}, {"name": "diagnostics", "type": "string"} ] } */ static void parse_finish_request(avro_slice_t *slice, bool *succeed, char **diag) { char filename[FILE_NAME_LEN]; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record, succeed_value, diag_value; size_t index; avro_reader_t reader; size_t size; sprintf(filename, "%s/%s", SCHEMA_PATH, "FinishRequestRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); reader = avro_reader_memory(slice->buffer, slice->len); if (avro_value_read(reader, &record)) { fprintf(stderr, "Unable to read record from memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_value_get_by_name(&record, "succeed", &succeed_value, &index); avro_value_get_boolean(&succeed_value, succeed); avro_value_get_by_name(&record, "diagnostics", &diag_value, &index); avro_value_get_string(&diag_value, diag, &size); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); }
static int write_data(int n_records) { int i; avro_schema_t schema; avro_schema_error_t error; avro_file_writer_t writer; avro_value_iface_t *iface; avro_value_t value; fprintf(stderr, "\nWriting...\n"); if (avro_schema_from_json(PERSON_SCHEMA, 0, &schema, &error)) { fprintf(stderr, "Unable to parse schema\n"); return -1; } if (avro_file_writer_create(filename, schema, &writer)) { fprintf(stderr, "There was an error creating file: %s\n", avro_strerror()); return -1; } iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &value); avro_value_t field; avro_value_get_by_index(&value, 0, &field, NULL); avro_value_set_int(&field, 123); for (i = 0; i < n_records; i++) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "There was an error writing file: %s\n", avro_strerror()); return -1; } } if (avro_file_writer_close(writer)) { fprintf(stderr, "There was an error creating file: %s\n", avro_strerror()); return -1; } avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(schema); return n_records; }
static int read_data() { int rval; int records_read = 0; avro_file_reader_t reader; avro_value_iface_t *iface; avro_value_t value; fprintf(stderr, "\nReading...\n"); rval = avro_file_reader(filename, &reader); if (rval) { fprintf(stderr, "Error: %s\n", avro_strerror()); return -1; } avro_schema_t schema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &value); while ((rval = avro_file_reader_read_value(reader, &value)) == 0) { avro_value_t field; int32_t val; avro_value_get_by_index(&value, 0, &field, NULL); avro_value_get_int(&field, &val); fprintf(stderr, "value = %d\n", val); records_read++; avro_value_reset(&value); } avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(schema); avro_file_reader_close(reader); fprintf(stderr, "read %d records.\n", records_read); if (rval != EOF) { fprintf(stderr, "Error: %s\n", avro_strerror()); return -1; } return records_read; }
/* typedef struct { int jobid; int vpid; } process_name_t; typedef struct { char *en_vars; char *args; char *host_name; process_name_t proc_name; } launch_context_t; typedef struct { bool is_successful; process_name_t proc_name; } launch_response_t; */ static void build_launch_response(launch_response_t *launch_response_array, int array_size, avro_slice_t **slice) { char filename[FILE_NAME_LEN]; char buf[BUFFER_SIZE]; long len = 0; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; avro_value_t results_value, LaunchResult_value, is_successful_value, name_value, jobid_value, vpid_value; size_t index; int i; avro_writer_t writer; sprintf(filename, "%s/%s", SCHEMA_PATH, "LaunchResponseRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); avro_value_get_by_name(&record, "results", &results_value, &index); for (i = 0; i < array_size; i++) { avro_value_append(&results_value, &LaunchResult_value, &index); avro_value_get_by_name(&LaunchResult_value, "is_successful", &is_successful_value, &index); avro_value_set_boolean(&is_successful_value, launch_response_array[i].is_successful); avro_value_get_by_name(&LaunchResult_value, "name", &name_value, &index); avro_value_get_by_name(&name_value, "jobid", &jobid_value, &index); avro_value_set_int(&jobid_value, launch_response_array[i].proc_name.jobid); avro_value_get_by_name(&name_value, "vpid", &vpid_value, &index); avro_value_set_int(&vpid_value, launch_response_array[i].proc_name.vpid); } /* create a writer with memory buffer */ writer = avro_writer_memory(buf, sizeof(buf)); /* write record to writer (buffer) */ if (avro_value_write(writer, &record)) { fprintf(stderr, "Unable to write record to memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_writer_flush(writer); len = avro_writer_tell(writer); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); *slice = xmalloc(sizeof(avro_slice_t)); (*slice)->buffer = xmalloc(len); (*slice)->len = len; memcpy((*slice)->buffer, buf, len); }
static int AvroDeserializer_init(AvroDeserializer *self, PyObject *args, PyObject *kwds) { int rval; PyObject *types = NULL; const char *schema_json; static char *kwlist[] = {"schema", "types", NULL}; self->flags = 0; self->iface = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|O", kwlist, &schema_json, &types)) { return -1; } rval = avro_schema_from_json(schema_json, 0, &self->schema, NULL); if (rval != 0 || self->schema == NULL) { PyErr_Format(PyExc_IOError, "Error reading schema: %s", avro_strerror()); return -1; } self->flags |= DESERIALIZER_SCHEMA_OK; self->iface = avro_generic_class_from_schema(self->schema); if (self->iface == NULL) { PyErr_SetString(PyExc_IOError, "Error creating generic class interface"); return -1; } self->datum_reader = avro_reader_memory(0, 0); if (!self->datum_reader) { PyErr_NoMemory(); return -1; } self->flags |= DESERIALIZER_READER_OK; /* copied verbatim from filereader */ if (types != NULL && PyObject_IsTrue(types)) { /* we still haven't incref'ed types here */ if (Py_TYPE(types) == get_avro_types_type()) { Py_INCREF(types); self->info.types = types; } else { self->info.types = PyObject_CallFunctionObjArgs( (PyObject *) get_avro_types_type(), NULL); if (self->info.types == NULL) { return -1; } declare_types(&self->info, self->schema); } } else { self->info.types = NULL; } return 0; }
static void process_file(const char *filename) { avro_file_reader_t reader; if (filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); exit(1); } } else { if (avro_file_reader(filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", filename, avro_strerror()); exit(1); } } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); while (avro_file_reader_read_value(reader, &value) == 0) { char *json; if (avro_value_to_json(&value, 1, &json)) { fprintf(stderr, "Error converting value to JSON: %s\n", avro_strerror()); } else { printf("%s\n", json); free(json); } avro_value_reset(&value); } avro_file_reader_close(reader); avro_value_decref(&value); avro_value_iface_decref(iface); }
static int read_data_datum() { int rval; int records_read = 0; avro_file_reader_t reader; avro_datum_t datum; fprintf(stderr, "\nReading...\n"); rval = avro_file_reader(filename, &reader); if (rval) { fprintf(stderr, "Error using 'datum': %s\n", avro_strerror()); return -1; } avro_schema_t schema = avro_file_reader_get_writer_schema(reader); while ((rval = avro_file_reader_read(reader, schema, &datum)) == 0) { avro_datum_t val_datum; int32_t val; if (avro_record_get(datum, "ab", &val_datum)) { fprintf(stderr, "Error getting value: %s\n", avro_strerror()); return -1; } avro_int32_get(val_datum, &val); fprintf(stderr, "value = %d\n", val); records_read++; avro_datum_decref(datum); } avro_schema_decref(schema); avro_file_reader_close(reader); fprintf(stderr, "read %d records using 'datum'.\n", records_read); if (rval != EOF) { fprintf(stderr, "Error using 'datum': %s\n", avro_strerror()); return -1; } return records_read; }
static int test_map(void) { avro_schema_t schema = avro_schema_map(avro_schema_long()); avro_datum_t datum = avro_map(schema); int64_t i = 0; char *nums[] = { "zero", "one", "two", "three", "four", "five", "six", NULL }; while (nums[i]) { avro_datum_t i_datum = avro_int64(i); avro_map_set(datum, nums[i], i_datum); avro_datum_decref(i_datum); i++; } if (avro_array_size(datum) != 7) { fprintf(stderr, "Unexpected map size\n"); exit(EXIT_FAILURE); } avro_datum_t value; const char *key; avro_map_get_key(datum, 2, &key); avro_map_get(datum, key, &value); int64_t val; avro_int64_get(value, &val); if (val != 2) { fprintf(stderr, "Unexpected map value 2\n"); exit(EXIT_FAILURE); } int index; if (avro_map_get_index(datum, "two", &index)) { fprintf(stderr, "Can't get index for key \"two\": %s\n", avro_strerror()); exit(EXIT_FAILURE); } if (index != 2) { fprintf(stderr, "Unexpected index for key \"two\"\n"); exit(EXIT_FAILURE); } if (!avro_map_get_index(datum, "foobar", &index)) { fprintf(stderr, "Unexpected index for key \"foobar\"\n"); exit(EXIT_FAILURE); } write_read_check(schema, datum, NULL, NULL, "map"); test_json(datum, "{\"zero\": 0, \"one\": 1, \"two\": 2, \"three\": 3, " "\"four\": 4, \"five\": 5, \"six\": 6}"); avro_datum_decref(datum); avro_schema_decref(schema); return 0; }
/* Returns 0 on success. On failure, sets mapper->error and returns nonzero. */ int table_metadata_update_schema(table_mapper_t mapper, table_metadata_t table, int is_key, const char* schema_json, size_t schema_len) { int prev_schema_id = is_key ? table->key_schema_id : table->row_schema_id; int schema_id = TABLE_MAPPER_SCHEMA_ID_MISSING; int err; if (mapper->registry) { err = schema_registry_request(mapper->registry, rd_kafka_topic_name(table->topic), is_key, schema_json, schema_len, &schema_id); if (err) { mapper_error(mapper, "Failed to register %s schema: %s", is_key ? "key" : "row", mapper->registry->error); return err; } table_metadata_set_schema_id(table, is_key, schema_id); } avro_schema_t schema; /* If running with a schema registry, we can use the registry to detect * if the schema we just saw is the same as the one we remembered * previously (since the registry guarantees to return the same id for * identical schemas). If the registry returns the same id as before, we * can skip parsing the new schema and just keep the previous one. * * However, if we're running without a registry, it's not so easy to detect * whether or not the schema changed, so in that case we just always parse * the new schema. (We could store the previous schema JSON and strcmp() * it with the new JSON, but that probably wouldn't save much over just * parsing the JSON, given this isn't a hot code path.) */ if (prev_schema_id == TABLE_MAPPER_SCHEMA_ID_MISSING || prev_schema_id != schema_id) { if (schema_json) { err = avro_schema_from_json_length(schema_json, schema_len, &schema); if (err) { mapper_error(mapper, "Could not parse %s schema: %s", is_key ? "key" : "row", avro_strerror()); return err; } } else { schema = NULL; } table_metadata_set_schema(table, is_key, schema); if (schema) avro_schema_decref(schema); } return 0; }
static PyObject * validate_func(PyObject *self, PyObject *args) { int rval; PyObject *datum; char *schema_json; avro_schema_t schema; if (!PyArg_ParseTuple(args, "Os", &datum, &schema_json)) { return NULL; } rval = avro_schema_from_json(schema_json, 0, &schema, NULL); if (rval != 0 || schema == NULL) { PyErr_Format(PyExc_IOError, "Error reading schema: %s", avro_strerror()); return NULL; } return Py_BuildValue("i", validate(datum, schema)); }
extern int parse_heartbeat_request(avro_slice_t *slice) { char filename[FILE_NAME_LEN]; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; size_t index; avro_reader_t reader; size_t size = 0; sprintf(filename, "%s/%s", SCHEMA_PATH, "HeartBeatRequestRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); reader = avro_reader_memory(slice->buffer, slice->len); if (avro_value_read(reader, &record)) { fprintf(stderr, "Unable to read record from memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_value_get_size(&record, &size); avro_value_iface_decref(iface); avro_schema_decref(schema); // printf("slice->len = %d\n", slice->len); // printf("size = %ld\n", size); // // if (size > 0) { // return 0; // } else { // return -1; // } if (size == 0) { return 0; } else { return -1; } }
/* Processes one tuple of the snapshot query result set. */ int snapshot_tuple(client_context_t context, PGresult *res, int row_number) { if (PQnfields(res) != 1) { client_error(context, "Unexpected response with %d fields", PQnfields(res)); return EIO; } if (PQgetisnull(res, row_number, 0)) { client_error(context, "Unexpected null response value"); return EIO; } if (PQfformat(res, 0) != 1) { /* format 1 == binary */ client_error(context, "Unexpected response format: %d", PQfformat(res, 0)); return EIO; } /* wal_pos == 0 == InvalidXLogRecPtr */ int err = parse_frame(context->repl.frame_reader, 0, PQgetvalue(res, row_number, 0), PQgetlength(res, row_number, 0)); if (err) { client_error(context, "Error parsing frame data: %s", avro_strerror()); } return err; }
static int AvroFileReader_init(AvroFileReader *self, PyObject *args, PyObject *kwds) { int rval; PyObject *pyfile; PyObject *types = NULL; FILE *file; char *schema_json; avro_writer_t schema_json_writer; size_t len; static char *kwlist[] = {"file", "types", NULL}; self->pyfile = NULL; self->flags = 0; self->iface = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist, &pyfile, &types)) { return -1; } file = PyFile_AsFile(pyfile); if (file == NULL) { return -1; } self->pyfile = pyfile; Py_INCREF(pyfile); if (avro_file_reader_fp(file, "pyfile", 0, &self->reader)) { PyErr_Format(PyExc_IOError, "Error opening file: %s", avro_strerror()); return -1; } self->flags |= AVROFILE_READER_OK; self->schema = avro_file_reader_get_writer_schema(self->reader); if (self->schema == NULL) { PyErr_Format(PyExc_IOError, "Error reading schema: %s", avro_strerror()); return -1; } len = 256; do { /* XXX horrible loop to get a big enough buffer for schema. */ len *= 2; schema_json = (char *)PyMem_Malloc(len); schema_json_writer = avro_writer_memory(schema_json, len); rval = avro_schema_to_json(self->schema, schema_json_writer); if (!rval) { rval = avro_write(schema_json_writer, (void *)"", 1); /* zero terminate */ if (!rval) { self->schema_json = PyString_FromString(schema_json); } } avro_writer_free(schema_json_writer); PyMem_Free(schema_json); } while (rval == ENOSPC); if (rval) { PyErr_Format(PyExc_IOError, "Error saving schema: %s", avro_strerror()); return -1; } self->flags |= AVROFILE_SCHEMA_OK; self->iface = avro_generic_class_from_schema(self->schema); if (self->iface == NULL) { PyErr_SetString(PyExc_IOError, "Error creating generic class interface"); return -1; } if (types != NULL && PyObject_IsTrue(types)) { /* we still haven't incref'ed types here */ if (Py_TYPE(types) == get_avro_types_type()) { Py_INCREF(types); self->info.types = types; } else { self->info.types = PyObject_CallFunctionObjArgs((PyObject *)get_avro_types_type(), NULL); if (self->info.types == NULL) { return -1; } declare_types(&self->info, self->schema); } } else { self->info.types = NULL; } return 0; }
static void build_heartbeat_response(completed_proc_t *completed_proc_array, int array_size, avro_slice_t **slice) { char filename[FILE_NAME_LEN]; char buf[BUFFER_SIZE]; long len = 0; avro_schema_t schema; avro_value_iface_t *iface; avro_value_t record; avro_value_t completed_processes_value, ProcessStatus_value; avro_value_t name_value, ProcessName_value, jobid_value, vpid_value; avro_value_t state_value; avro_value_t exit_value_value; size_t index; int i; avro_writer_t writer; sprintf(filename, "%s/%s", SCHEMA_PATH, "HeartBeatResponseRecordAvro.avsc"); init_schema(filename, &schema); iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); avro_value_get_by_name(&record, "completed_processes", &completed_processes_value, &index); for (i = 0; i < array_size; i++) { avro_value_append(&completed_processes_value, &ProcessStatus_value, &index); avro_value_get_by_name(&ProcessStatus_value, "name", &name_value, &index); avro_value_get_by_name(&name_value, "jobid", &jobid_value, &index); avro_value_set_int(&jobid_value, completed_proc_array[i].proc_name.jobid); avro_value_get_by_name(&name_value, "vpid", &vpid_value, &index); avro_value_set_int(&vpid_value, completed_proc_array[i].proc_name.vpid); avro_value_get_by_name(&ProcessStatus_value, "state", &state_value, &index); avro_value_set_enum(&state_value, completed_proc_array[i].proc_state); avro_value_get_by_name(&ProcessStatus_value, "exit_value", &exit_value_value, &index); avro_value_set_int(&exit_value_value, completed_proc_array[i].exit_value); } /* create a writer with memory buffer */ writer = avro_writer_memory(buf, sizeof(buf)); /* write record to writer (buffer) */ if (avro_value_write(writer, &record)) { fprintf(stderr, "Unable to write record to memory buffer\n"); fprintf(stderr, "Error: %s\n", avro_strerror()); exit(1); } avro_writer_flush(writer); len = avro_writer_tell(writer); //avro_generic_value_free(&record); avro_value_iface_decref(iface); avro_schema_decref(schema); *slice = xmalloc(sizeof(avro_slice_t)); (*slice)->buffer = xmalloc(len); (*slice)->len = len; memcpy((*slice)->buffer, buf, len); }
int process_file(const char *in_filename, const char *out_filename) { avro_file_reader_t reader; avro_file_writer_t writer; if (in_filename == NULL) { if (avro_file_reader_fp(stdin, "<stdin>", 0, &reader)) { fprintf(stderr, "Error opening <stdin>:\n %s\n", avro_strerror()); return 1; } } else { if (avro_file_reader(in_filename, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", in_filename, avro_strerror()); return 1; } } avro_schema_t wschema; wschema = avro_file_reader_get_writer_schema(reader); /* Check that the reader schema is the same as the writer schema */ { avro_schema_t oschema; avro_file_reader_t oreader; if (avro_file_reader(out_filename, &oreader)) { fprintf(stderr, "Error opening %s:\n %s\n", out_filename, avro_strerror()); avro_file_reader_close(reader); return 1; } oschema = avro_file_reader_get_writer_schema(oreader); if (avro_schema_equal(oschema, wschema) == 0) { fprintf(stderr, "Error: reader and writer schema are not equal.\n"); avro_file_reader_close(oreader); avro_file_reader_close(reader); return 1; } avro_file_reader_close(oreader); avro_schema_decref(oschema); } if (avro_file_writer_open(out_filename, &writer)) { fprintf(stderr, "Error opening %s:\n %s\n", out_filename, avro_strerror()); avro_file_reader_close(reader); return 1; } avro_value_iface_t *iface; avro_value_t value; iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); while (avro_file_reader_read_value(reader, &value) == 0) { if (avro_file_writer_append_value(writer, &value)) { fprintf(stderr, "Error writing to %s:\n %s\n", out_filename, avro_strerror()); return 1; } avro_value_reset(&value); } avro_file_reader_close(reader); avro_file_writer_close(writer); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); return 0; }
static void process_file(const char *filename) { avro_file_reader_t reader; FILE *fp; int should_close; if (filename == NULL) { fp = stdin; filename = "<stdin>"; should_close = 0; } else { fp = fopen(filename, "rb"); should_close = 1; if (fp == NULL) { fprintf(stderr, "Error opening %s:\n %s\n", filename, strerror(errno)); exit(1); } } if (avro_file_reader_fp(fp, filename, 0, &reader)) { fprintf(stderr, "Error opening %s:\n %s\n", filename, avro_strerror()); if (should_close) { fclose(fp); } exit(1); } avro_schema_t wschema; avro_value_iface_t *iface; avro_value_t value; wschema = avro_file_reader_get_writer_schema(reader); iface = avro_generic_class_from_schema(wschema); avro_generic_value_new(iface, &value); int rval; while ((rval = avro_file_reader_read_value(reader, &value)) == 0) { char *json; if (avro_value_to_json(&value, 1, &json)) { fprintf(stderr, "Error converting value to JSON: %s\n", avro_strerror()); } else { printf("%s\n", json); free(json); } avro_value_reset(&value); } // If it was not an EOF that caused it to fail, // print the error. if (rval != EOF) { fprintf(stderr, "Error: %s\n", avro_strerror()); } avro_file_reader_close(reader); avro_value_decref(&value); avro_value_iface_decref(iface); avro_schema_decref(wschema); if (should_close) { fclose(fp); } }
/* function call from lmlite with parameters */ void network_devices_status_report(struct networkdevicestatusdata *head, BOOL extender, char* parent_mac) { int i = 0, k = 0; uint8_t* b64buffer = NULL; size_t decodesize = 0; int numElements = 0; struct networkdevicestatusdata* ptr = head; avro_writer_t writer; char * serviceName = "lmlite"; char * dest = "event:raw.kestrel.reports.NetworkDevicesStatus"; char * contentType = "avro/binary"; // contentType "application/json", "avro/binary" uuid_t transaction_id; char trans_id[37]; char CpeMacHoldingBuf[ 20 ] = {0}; unsigned char CpeMacid[ 7 ] = {0}; CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, LMLite %s : ENTER \n", __FUNCTION__ )); numElements = NumberofElementsinLinkedList(head); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, numElements = %d\n", numElements )); OneAvroSerializedSize = 0; /* goes thru total number of elements in link list */ writer = prepare_writer_status(); //Reset out writer avro_writer_reset(writer); //Network Device Report avro_value_t adr; avro_generic_value_new(iface, &adr); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, GatewayNetworkDeviceStatusReport\tType: %d\n", avro_value_get_type(&adr))); avro_value_t adrField = {0,0}; avro_value_t array = {0,0}; size_t new_index = 0; //Optional value for unions, mac address is an union avro_value_t optional = {0,0}; // timestamp - long avro_value_get_by_name(&adr, "header", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "timestamp", &adrField, NULL); avro_value_set_branch(&adrField, 1, &optional); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); struct timeval ts; gettimeofday(&ts, NULL); #ifndef UTC_ENABLE int64_t tstamp_av_main = ((int64_t) (ts.tv_sec - getTimeOffsetFromUtc()) * 1000000) + (int64_t) ts.tv_usec; #else int64_t tstamp_av_main = ((int64_t) (ts.tv_sec) * 1000000) + (int64_t) ts.tv_usec; #endif tstamp_av_main = tstamp_av_main/1000; avro_value_set_long(&optional, tstamp_av_main ); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, timestamp = ""%" PRId64 "\n", tstamp_av_main )); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, timestamp\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // uuid - fixed 16 bytes uuid_generate_random(transaction_id); uuid_unparse(transaction_id, trans_id); avro_value_get_by_name(&adr, "header", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "uuid", &adrField, NULL); avro_value_set_branch(&adrField, 1, &optional); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_fixed(&optional, transaction_id, 16); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, uuid\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //source - string avro_value_get_by_name(&adr, "header", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "source", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_string(&optional, ReportSource); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, source\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); if ( extender == FALSE ) { //cpe_id block /* MAC - Get CPE mac address, do it only pointer is NULL */ memset(CpeMacHoldingBuf, 0, sizeof CpeMacHoldingBuf); memset(CpeMacid, 0, sizeof CpeMacid); if ( macStr == NULL ) { macStr = getDeviceMac(); strncpy( CpemacStr, macStr, sizeof(CpemacStr)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Received DeviceMac from Atom side: %s\n",macStr)); } for (k = 0; k < 6; k++ ) { /* copy 2 bytes */ CpeMacHoldingBuf[ k * 2 ] = CpemacStr[ k * 2 ]; CpeMacHoldingBuf[ k * 2 + 1 ] = CpemacStr[ k * 2 + 1 ]; CpeMacid[ k ] = (unsigned char)strtol(&CpeMacHoldingBuf[ k * 2 ], NULL, 16); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Mac address = %0x\n", CpeMacid[ k ] )); } avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "mac_address", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_fixed(&optional, CpeMacid, 6); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, mac_address\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // cpe_type - string avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "cpe_type", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_string(&optional, CPE_TYPE_GATEWAY_STRING); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, cpe_type\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // cpe_parent - Recurrsive CPEIdentifier block avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "cpe_parent", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 0, &optional); avro_value_set_null(&optional); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, cpe_parent\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); } else { //cpe_id block avro_value_t parent_optional = {0,0}, parent_adrField = {0,0}; memset(CpeMacHoldingBuf, 0, sizeof CpeMacHoldingBuf); memset(CpeMacid, 0, sizeof CpeMacid); for (k = 0; k < 6; k++ ) { /* copy 2 bytes */ CpeMacHoldingBuf[ k * 2 ] = parent_mac[ k * 3 ]; CpeMacHoldingBuf[ k * 2 + 1 ] = parent_mac[ k * 3 + 1 ]; CpeMacid[ k ] = (unsigned char)strtol(&CpeMacHoldingBuf[ k * 2 ], NULL, 16); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Extender Mac address = %0x\n", CpeMacid[ k ] )); } avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "mac_address", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_fixed(&optional, CpeMacid, 6); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, mac_address\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // cpe_type - string avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "cpe_type", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_string(&optional, CPE_TYPE_EXTENDER_STRING); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, cpe_type\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // cpe_parent - Recurrsive CPEIdentifier block avro_value_get_by_name(&adr, "cpe_id", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_get_by_name(&adrField, "cpe_parent", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); /* MAC - Get CPE mac address, do it only pointer is NULL */ if ( macStr == NULL ) { macStr = getDeviceMac(); strncpy( CpemacStr, macStr, sizeof(CpemacStr)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Received DeviceMac from Atom side: %s\n",macStr)); } memset(CpeMacHoldingBuf, 0, sizeof CpeMacHoldingBuf); memset(CpeMacid, 0, sizeof CpeMacid); for (k = 0; k < 6; k++ ) { /* copy 2 bytes */ CpeMacHoldingBuf[ k * 2 ] = CpemacStr[ k * 2 ]; CpeMacHoldingBuf[ k * 2 + 1 ] = CpemacStr[ k * 2 + 1 ]; CpeMacid[ k ] = (unsigned char)strtol(&CpeMacHoldingBuf[ k * 2 ], NULL, 16); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG Parent Mac address = %0x\n", CpeMacid[ k ] )); } // assume 1 parent ONLY // Parent MAC avro_value_set_branch(&adrField, 1, &parent_optional); avro_value_get_by_name(&parent_optional, "mac_address", &parent_adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&parent_adrField, 1, &parent_optional); avro_value_set_fixed(&parent_optional, CpeMacid, 6); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, parent mac_address\tType: %d\n", avro_value_get_type(&parent_optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // Parent cpe_type avro_value_set_branch(&adrField, 1, &parent_optional); avro_value_get_by_name(&parent_optional, "cpe_type", &parent_adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&parent_adrField, 1, &parent_optional); avro_value_set_string(&parent_optional, CPE_TYPE_GATEWAY_STRING); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, parent cpe_type\tType: %d\n", avro_value_get_type(&parent_optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); // no more parent, set NULL avro_value_set_branch(&adrField, 1, &parent_optional); avro_value_get_by_name(&parent_optional, "cpe_parent", &parent_adrField, NULL); avro_value_set_branch(&parent_adrField, 0, &parent_optional); avro_value_set_null(&parent_optional); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, parent cpe_parent\tType: %d\n", avro_value_get_type(&parent_optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); } //host_table_version block avro_value_get_by_name(&adr, "host_table_version", &adrField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&adrField, 1, &optional); avro_value_set_long(&optional, lmHosts.lastActivity); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, host_table_version\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //Data Field block avro_value_get_by_name(&adr, "data", &adrField, NULL); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, NetworkDeviceStatusReports - data array\tType: %d\n", avro_value_get_type(&adrField))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //adrField now contains a reference to the AssociatedDeviceReportsArray //Device Report avro_value_t dr = {0,0}; //Current Device Report Field avro_value_t drField = {0,0}; while(ptr) { if( (!strcmp(ptr->parent, parent_mac) && (extender == TRUE)) || (extender == FALSE) ) { CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Current Link List Ptr = [0x%lx], numElements = %d\n", (ulong)ptr, numElements )); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tDevice entry #: %d\n", i + 1)); //Append a DeviceReport item to array avro_value_append(&adrField, &dr, NULL); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tDevice Status Report\tType: %d\n", avro_value_get_type(&dr))); //data array block memset(CpeMacHoldingBuf, 0, sizeof CpeMacHoldingBuf); memset(CpeMacid, 0, sizeof CpeMacid); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Mac address from node list = %s \n", ptr->device_mac )); for (k = 0; k < 6; k++ ) { /* copy 2 bytes */ CpeMacHoldingBuf[ k * 2 ] = ptr->device_mac[ k * 3 ]; CpeMacHoldingBuf[ k * 2 + 1 ] = ptr->device_mac[ k * 3 + 1 ]; CpeMacid[ k ] = (unsigned char)strtol(&CpeMacHoldingBuf[ k * 2 ], NULL, 16); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Mac address = %0x\n", CpeMacid[ k ] )); } //device_mac - fixed 6 bytes avro_value_get_by_name(&dr, "device_id", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, device_id\tType: %d\n", avro_value_get_type(&drField))); avro_value_get_by_name(&drField, "mac_address", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); avro_value_set_fixed(&optional, CpeMacid, 6); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tmac_address\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //device_type - string avro_value_get_by_name(&dr, "device_id", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, device_id\tType: %d\n", avro_value_get_type(&drField))); avro_value_get_by_name(&drField, "device_type", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); avro_value_set_string(&optional, ptr->device_type); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tdevice_type\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //timestamp - long avro_value_get_by_name(&dr, "timestamp", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); int64_t tstamp_av = (int64_t) ptr->timestamp.tv_sec * 1000000 + (int64_t) ptr->timestamp.tv_usec; tstamp_av = tstamp_av/1000; avro_value_set_long(&optional, tstamp_av); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, timestamp = ""%" PRId64 "\n", tstamp_av )); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \ttimestamp\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //interface_name - string avro_value_get_by_name(&dr, "interface_name", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); //avro_value_set_string(&optional, " aa "); avro_value_set_string(&optional, ptr->interface_name ); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tinterface_name\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //status - enum avro_value_get_by_name(&dr, "status", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, status\tType: %d\n", avro_value_get_type(&optional))); if ( ptr->is_active ) avro_value_set_enum(&optional, avro_schema_enum_get_by_name(avro_value_get_schema(&optional), "ONLINE")); else avro_value_set_enum(&optional, avro_schema_enum_get_by_name(avro_value_get_schema(&optional), "OFFLINE")); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //hostname - string avro_value_get_by_name(&dr, "hostname", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); avro_value_set_string(&optional, ptr->hostname); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \thostname\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); //ipaddress - array avro_value_get_by_name(&dr, "ip_addresses", &drField, NULL); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); avro_value_set_branch(&drField, 1, &optional); avro_value_append(&optional, &array, NULL); avro_value_set_string(&array, ptr->ipaddress); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, \tipaddress\tType: %d\n", avro_value_get_type(&optional))); if ( CHK_AVRO_ERR ) CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, %s\n", avro_strerror())); i++; } #if SIMULATION ptr = 0; #else ptr = ptr->next; // next link list #endif /* check for writer size, if buffer is almost full, skip trailing linklist */ avro_value_sizeof(&adr, &AvroSerializedSize); OneAvroSerializedSize = ( OneAvroSerializedSize == 0 ) ? AvroSerializedSize : OneAvroSerializedSize; if ( ( WRITER_BUF_SIZE - AvroSerializedSize ) < OneAvroSerializedSize ) { CcspLMLiteTrace(("RDK_LOG_ERROR, AVRO write buffer is almost full, size = %d func %s, exit!\n", (int)AvroSerializedSize, __FUNCTION__ )); break; } } //Thats the end of that avro_value_write(writer, &adr); avro_value_sizeof(&adr, &AvroSerializedSize); AvroSerializedSize += MAGIC_NUMBER_SIZE + SCHEMA_ID_LENGTH; CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Serialized writer size %d\n", (int)AvroSerializedSize)); //Free up memory avro_value_decref(&adr); avro_writer_free(writer); //free(buffer); /* if ( consoleDebugEnable ) { // b64 encoding decodesize = b64_get_encoded_buffer_size( AvroSerializedSize ); b64buffer = malloc(decodesize * sizeof(uint8_t)); b64_encode( (uint8_t*)AvroSerializedBuf, AvroSerializedSize, b64buffer); fprintf( stderr, "\nAVro serialized data\n"); for (k = 0; k < (int)AvroSerializedSize ; k++) { char buf[30]; if ( ( k % 32 ) == 0 ) fprintf( stderr, "\n"); sprintf(buf, "%02X", (unsigned char)AvroSerializedBuf[k]); fprintf( stderr, "%c%c", buf[0], buf[1] ); } fprintf( stderr, "\n\nB64 data\n"); for (k = 0; k < (int)decodesize; k++) { if ( ( k % 32 ) == 0 ) fprintf( stderr, "\n"); fprintf( stderr, "%c", b64buffer[k]); } fprintf( stderr, "\n\n"); free(b64buffer); }*/ CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, Before ND WebPA SEND message call\n")); #ifdef PARODUS_ENABLE CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, serviceName: %s\n", serviceName)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, dest: %s\n", dest)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, trans_id: %s\n", trans_id)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, contentType: %s\n", contentType)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, AvroSerializedBuf: %s\n", AvroSerializedBuf)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, AvroSerializedSize: %d\n", (int)AvroSerializedSize)); #endif // Send data from LMLite to webpa using CCSP bus interface sendWebpaMsg(serviceName, dest, trans_id, contentType, AvroSerializedBuf, AvroSerializedSize); CcspTraceWarning(("NetworkDevicesStatus report sent to Webpa, Destination=%s, Transaction-Id=%s \n",dest,trans_id)); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, After ND WebPA SEND message call\n")); CcspLMLiteConsoleTrace(("RDK_LOG_DEBUG, LMLite %s : EXIT \n", __FUNCTION__ )); #if SIMULATION exit(0); #endif }
void kafka_cache_purge(struct chained_cache *queue[], int index, int safe_action) { struct pkt_primitives *data = NULL; struct pkt_bgp_primitives *pbgp = NULL; struct pkt_nat_primitives *pnat = NULL; struct pkt_mpls_primitives *pmpls = NULL; struct pkt_tunnel_primitives *ptun = NULL; char *pcust = NULL; struct pkt_vlen_hdr_primitives *pvlen = NULL; struct pkt_bgp_primitives empty_pbgp; struct pkt_nat_primitives empty_pnat; struct pkt_mpls_primitives empty_pmpls; struct pkt_tunnel_primitives empty_ptun; char *empty_pcust = NULL; char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN]; char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL; int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index; int mv_num = 0, mv_num_save = 0; time_t start, duration; pid_t writer_pid = getpid(); char *json_buf = NULL; int json_buf_off = 0; #ifdef WITH_AVRO avro_writer_t avro_writer; char *avro_buf = NULL; int avro_buffer_full = FALSE; #endif p_kafka_init_host(&kafkap_kafka_host, config.kafka_config_file); /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; if (!config.sql_table) config.sql_table = default_kafka_topic; else { if (strchr(config.sql_table, '$')) { is_topic_dyn = TRUE; orig_kafka_topic = config.sql_table; } } if (config.amqp_routing_key_rr) orig_kafka_topic = config.sql_table; p_kafka_init_topic_rr(&kafkap_kafka_host); p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr); empty_pcust = malloc(config.cpptrs.len); if (!empty_pcust) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type); exit_plugin(1); } memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives)); memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives)); memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives)); memset(&empty_ptun, 0, sizeof(struct pkt_tunnel_primitives)); memset(empty_pcust, 0, config.cpptrs.len); p_kafka_connect_to_produce(&kafkap_kafka_host); p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port); if (!is_topic_dyn && !config.amqp_routing_key_rr) p_kafka_set_topic(&kafkap_kafka_host, config.sql_table); p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition); p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen); if (config.message_broker_output & PRINT_OUTPUT_JSON) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR); else if (config.message_broker_output & PRINT_OUTPUT_AVRO) p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_BIN); else { Log(LOG_ERR, "ERROR ( %s/%s ): Unsupported kafka_output value specified. Exiting.\n", config.name, config.type); exit_plugin(1); } for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++) stop = P_preprocess_funcs[j](queue, &index, j); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid); start = time(NULL); if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_init_json(config.name, writer_pid); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (config.sql_multi_values) { json_buf = malloc(config.sql_multi_values); if (!json_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (json_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(json_buf, 0, config.sql_multi_values); } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.avro_buffer_size) config.avro_buffer_size = LARGEBUFLEN; avro_buf = malloc(config.avro_buffer_size); if (!avro_buf) { Log(LOG_ERR, "ERROR ( %s/%s ): malloc() failed (avro_buf). Exiting ..\n", config.name, config.type); exit_plugin(1); } else memset(avro_buf, 0, config.avro_buffer_size); avro_writer = avro_writer_memory(avro_buf, config.avro_buffer_size); #endif } for (j = 0; j < index; j++) { void *json_obj; char *json_str; if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue; data = &queue[j]->primitives; if (queue[j]->pbgp) pbgp = queue[j]->pbgp; else pbgp = &empty_pbgp; if (queue[j]->pnat) pnat = queue[j]->pnat; else pnat = &empty_pnat; if (queue[j]->pmpls) pmpls = queue[j]->pmpls; else pmpls = &empty_pmpls; if (queue[j]->ptun) ptun = queue[j]->ptun; else ptun = &empty_ptun; if (queue[j]->pcust) pcust = queue[j]->pcust; else pcust = empty_pcust; if (queue[j]->pvlen) pvlen = queue[j]->pvlen; else pvlen = NULL; if (queue[j]->valid == PRINT_CACHE_FREE) continue; if (config.message_broker_output & PRINT_OUTPUT_JSON) { #ifdef WITH_JANSSON json_t *json_obj = json_object(); int idx; for (idx = 0; idx < N_PRIMITIVES && cjhandler[idx]; idx++) cjhandler[idx](json_obj, queue[j]); add_writer_name_and_pid_json(json_obj, config.name, writer_pid); json_str = compose_json_str(json_obj); #endif } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO avro_value_iface_t *avro_iface = avro_generic_class_from_schema(avro_acct_schema); avro_value_t avro_value = compose_avro(config.what_to_count, config.what_to_count_2, queue[j]->flow_type, &queue[j]->primitives, pbgp, pnat, pmpls, ptun, pcust, pvlen, queue[j]->bytes_counter, queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags, &queue[j]->basetime, queue[j]->stitch, avro_iface); size_t avro_value_size; add_writer_name_and_pid_avro(avro_value, config.name, writer_pid); avro_value_sizeof(&avro_value, &avro_value_size); if (avro_value_size > config.avro_buffer_size) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: insufficient buffer size (avro_buffer_size=%u)\n", config.name, config.type, config.avro_buffer_size); Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: increase value or look for avro_buffer_size in CONFIG-KEYS document.\n\n", config.name, config.type); exit_plugin(1); } else if (avro_value_size >= (config.avro_buffer_size - avro_writer_tell(avro_writer))) { avro_buffer_full = TRUE; j--; } else if (avro_value_write(avro_writer, &avro_value)) { Log(LOG_ERR, "ERROR ( %s/%s ): AVRO: unable to write value: %s\n", config.name, config.type, avro_strerror()); exit_plugin(1); } else { mv_num++; } avro_value_decref(&avro_value); avro_value_iface_decref(avro_iface); #else if (config.debug) Log(LOG_DEBUG, "DEBUG ( %s/%s ): compose_avro(): AVRO object not created due to missing --enable-avro\n", config.name, config.type); #endif } if (config.message_broker_output & PRINT_OUTPUT_JSON) { char *tmp_str = NULL; if (json_str && config.sql_multi_values) { int json_strlen = (strlen(json_str) ? (strlen(json_str) + 1) : 0); if (json_strlen >= (config.sql_multi_values - json_buf_off)) { if (json_strlen >= config.sql_multi_values) { Log(LOG_ERR, "ERROR ( %s/%s ): kafka_multi_values not large enough to store JSON elements. Exiting ..\n", config.name, config.type); exit(1); } tmp_str = json_str; json_str = json_buf; } else { strcat(json_buf, json_str); mv_num++; string_add_newline(json_buf); json_buf_off = strlen(json_buf); free(json_str); json_str = NULL; } } if (json_str) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); if (config.sql_multi_values) { json_str = tmp_str; strcpy(json_buf, json_str); mv_num_save = mv_num; mv_num = 1; string_add_newline(json_buf); json_buf_off = strlen(json_buf); } free(json_str); json_str = NULL; if (!ret) { if (!config.sql_multi_values) qn++; else qn += mv_num_save; } else break; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (!config.sql_multi_values || (mv_num >= config.sql_multi_values) || avro_buffer_full) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_reset(avro_writer); avro_buffer_full = FALSE; mv_num_save = mv_num; mv_num = 0; if (!ret) qn += mv_num_save; else break; } #endif } } if (config.sql_multi_values) { if (config.message_broker_output & PRINT_OUTPUT_JSON) { if (json_buf && json_buf_off) { /* no handling of dyn routing keys here: not compatible */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_buf); ret = p_kafka_produce_data(&kafkap_kafka_host, json_buf, strlen(json_buf)); if (!ret) qn += mv_num; } } else if (config.message_broker_output & PRINT_OUTPUT_AVRO) { #ifdef WITH_AVRO if (avro_writer_tell(avro_writer)) { ret = p_kafka_produce_data(&kafkap_kafka_host, avro_buf, avro_writer_tell(avro_writer)); avro_writer_free(avro_writer); if (!ret) qn += mv_num; } #endif } } duration = time(NULL)-start; if (config.print_markers) { if (config.message_broker_output & PRINT_OUTPUT_JSON || config.message_broker_output & PRINT_OUTPUT_AVRO) { void *json_obj; char *json_str; json_obj = compose_purge_close_json(config.name, writer_pid, qn, saved_index, duration); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { sleep(1); /* Let's give a small delay to facilitate purge_close being the last message in batch in case of partitioned topics */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } } p_kafka_close(&kafkap_kafka_host, FALSE); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", config.name, config.type, writer_pid, qn, saved_index, duration); if (config.sql_trigger_exec && !safe_action) P_trigger_exec(config.sql_trigger_exec); if (empty_pcust) free(empty_pcust); if (json_buf) free(json_buf); #ifdef WITH_AVRO if (avro_buf) free(avro_buf); #endif }
int main(int argc, char *argv[]) { FILE *input; avro_schema_t schema; avro_file_writer_t out; const char *key; int opt, opterr = 0, verbose = 0, memstat = 0, errabort = 0, strjson = 0; char *schema_arg = NULL; char *codec = NULL; char *endptr = NULL; char *outpath = NULL; size_t block_sz = 0; size_t max_str_sz = 0; extern char *optarg; extern int optind, optopt; while ((opt = getopt(argc, argv, "c:s:S:b:z:dmxjh")) != -1) { switch (opt) { case 's': schema_arg = optarg; break; case 'S': schema_arg = read_schema_file(optarg); break; case 'b': block_sz = strtol(optarg, &endptr, 0); if (*endptr) { fprintf(stderr, "ERROR: Invalid block size for -b: %s\n", optarg); opterr++; } break; case 'z': max_str_sz = strtol(optarg, &endptr, 0); if (*endptr) { fprintf(stderr, "ERROR: Invalid maximum string size for -z: %s\n", optarg); opterr++; } break; case 'c': codec = optarg; break; case 'd': verbose = 1; break; case 'x': errabort = 1; break; case 'j': strjson = 1; break; case 'm': #if defined(__linux__) memstat = 1; #else usage_error(argv[0], "Memory stats is a Linux-only feature!"); #endif break; case 'h': print_help(argv[0]); exit(0); case ':': fprintf(stderr, "ERROR: Option -%c requires an operand\n", optopt); opterr++; break; case '?': fprintf(stderr, "ERROR: Unrecognized option: -%c\n", optopt); opterr++; } } int file_args_cnt = (argc - optind); if (file_args_cnt == 0) { usage_error(argv[0], "Please provide at least one file name argument"); } if (file_args_cnt > 2) { fprintf(stderr, "Too many file name arguments: %d!\n", file_args_cnt); usage_error(argv[0], 0); } if (opterr) usage_error(argv[0], 0); if (!schema_arg) usage_error(argv[0], "Please provide correct schema!"); if (!codec) codec = "null"; else if (strcmp(codec, "snappy") && strcmp(codec, "deflate") && strcmp(codec, "lzma") && strcmp(codec, "null")) { fprintf(stderr, "ERROR: Invalid codec %s, valid codecs: snappy, deflate, lzma, null\n", codec); exit(EXIT_FAILURE); } if ((argc - optind) == 1) { input = stdin; outpath = argv[optind]; } else { outpath = argv[optind+1]; input = fopen(argv[optind], "rb"); if ( errno != 0 ) { fprintf(stderr, "ERROR: Cannot open input file: %s: ", argv[optind]); perror(0); exit(EXIT_FAILURE); } } if (avro_schema_from_json_length(schema_arg, strlen(schema_arg), &schema)) { fprintf(stderr, "ERROR: Unable to parse schema: '%s'\n", schema_arg); exit(EXIT_FAILURE); } if (!strcmp(outpath, "-")) { if (avro_file_writer_create_with_codec_fp(stdout, outpath, 0, schema, &out, codec, block_sz)) { fprintf(stderr, "ERROR: avro_file_writer_create_with_codec_fp FAILED: %s\n", avro_strerror()); exit(EXIT_FAILURE); } } else { remove(outpath); if (avro_file_writer_create_with_codec(outpath, schema, &out, codec, block_sz)) { fprintf(stderr, "ERROR: avro_file_writer_create_with_codec FAILED: %s\n", avro_strerror()); exit(EXIT_FAILURE); } } if (verbose) fprintf(stderr, "Using codec: %s\n", codec); process_file(input, out, schema, verbose, memstat, errabort, strjson, max_str_sz); if (verbose) printf("Closing writer....\n"); avro_file_writer_close(out); }
void process_file(FILE *input, avro_file_writer_t out, avro_schema_t schema, int verbose, int memstat, int errabort, int strjson, size_t max_str_sz) { json_error_t err; json_t *json; int n = 0; json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); while (!feof(input)) { n++; if (verbose && !(n % 1000)) printf("Processing record %d\n", n); if (!json) { if (errabort) { fprintf(stderr, "JSON error on line %d, column %d, pos %d: %s, aborting.\n", n, err.column, err.position, err.text); return; } fprintf(stderr, "JSON error on line %d, column %d, pos %d: %s, skipping to EOL\n", n, err.column, err.position, err.text); while (getc(input) != '\n' && !feof(input)) {}; json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); continue; } avro_value_t record; avro_value_iface_t *iface = avro_generic_class_from_schema(schema); avro_generic_value_new(iface, &record); if (!schema_traverse(schema, json, NULL, &record, 0, strjson, max_str_sz)) { if (avro_file_writer_append_value(out, &record)) { fprintf(stderr, "ERROR: avro_file_writer_append_value() FAILED: %s\n", avro_strerror()); exit(EXIT_FAILURE); } } else fprintf(stderr, "Error processing record %d, skipping...\n", n); avro_value_iface_decref(iface); avro_value_decref(&record); json_decref(json); if (memstat && !(n % 1000)) memory_status(); json = json_loadf(input, JSON_DISABLE_EOF_CHECK, &err); } if (memstat) memory_status(); avro_schema_decref(schema); }
static int lua_avro_error(lua_State *L) { lua_pushstring(L, avro_strerror()); return lua_error(L); }
void reset_frame(plugin_state *state) { if (avro_value_reset(&state->frame_value)) { elog(ERROR, "Avro value reset failed: %s", avro_strerror()); } }