void rb_monitor_get_op_variables(const rb_monitor_t *monitor, char ***vars, size_t *vars_size) { void *evaluator = NULL; (*vars) = NULL; *vars_size = 0; struct { char **vars; int count; } all_vars; if (monitor->type != RB_MONITOR_T__OP) { goto no_deps; } evaluator = evaluator_create((char *)monitor->cmd_arg); if (NULL == evaluator) { rdlog(LOG_ERR, "Couldn't create an evaluator from %s", monitor->cmd_arg); goto no_deps; } evaluator_get_variables(evaluator, &all_vars.vars, &all_vars.count); (*vars) = malloc((size_t)all_vars.count * sizeof((*vars)[0])); if (*vars == NULL) { rdlog(LOG_CRIT, "Couldn't allocate memory for %d vars", all_vars.count); goto no_deps; } for (int i = 0; i < all_vars.count; ++i) { (*vars)[i] = strdup(all_vars.vars[i]); if (NULL == (*vars)[i]) { rdlog(LOG_ERR, "Couldn't strdup %s (OOM?)", all_vars.vars[i]); for (int j = 0; j < i; ++j) { free((*vars)[j]); (*vars)[j] = NULL; } goto no_deps; } } *vars_size = (size_t)all_vars.count; evaluator_destroy(evaluator); return; no_deps: if (*vars) { free(*vars); } *vars = NULL; *vars_size = 0; if (evaluator) { evaluator_destroy(evaluator); } }
/// @TODO we should print all with this function static void print_monitor_value_enrichment(struct printbuf *buf, const json_object *const_enrichment) { json_object *enrichment = (json_object *)const_enrichment; for (struct json_object_iterator i = json_object_iter_begin(enrichment), end = json_object_iter_end(enrichment); !json_object_iter_equal(&i, &end); json_object_iter_next(&i)) { const char *key = json_object_iter_peek_name(&i); json_object *val = json_object_iter_peek_value(&i); const json_type type = json_object_get_type(val); switch (type) { case json_type_string: print_monitor_value_enrichment_str(buf, key, val); break; case json_type_int: print_monitor_value_enrichment_int(buf, key, val); break; case json_type_null: sprintbuf(buf, ",\"%s\":null", key); break; case json_type_boolean: { const json_bool b = json_object_get_boolean(val); sprintbuf(buf, ",\"%s\":%s", key, b == FALSE ? "false" : "true"); break; } case json_type_double: { const double d = json_object_get_double(val); sprintbuf(buf, ",\"%s\":%lf", key, d); break; } case json_type_object: case json_type_array: { rdlog(LOG_ERR, "Can't enrich with objects/array at this time"); break; } default: rdlog(LOG_ERR, "Don't know how to duplicate JSON type " "%d", type); break; }; } }
static int send_to_socket(int fd,const char *data,size_t len){ struct timeval tv = WRITE_SELECT_TIMEVAL; const int select_result = write_select_socket(fd,&tv); if(select_result > 0){ return write(fd,data,len); }else if(select_result == 0){ rdlog(LOG_ERR,"Socket not ready for writing in %ld.%6ld. Closing.", WRITE_SELECT_TIMEVAL.tv_sec,WRITE_SELECT_TIMEVAL.tv_usec); return select_result; }else{ rdlog(LOG_ERR,"Error writing to socket: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); return select_result; } }
struct monitor_value *new_monitor_value_array(size_t n_children, struct monitor_value **children, struct monitor_value *split_op) { struct monitor_value *ret = calloc(1, sizeof(*ret)); if (NULL == ret) { if (split_op) { rb_monitor_value_done(split_op); } for (size_t i = 0; i < n_children; ++i) { if (children[i]) { rb_monitor_value_done(children[i]); } } free(children); rdlog(LOG_ERR, "Couldn't allocate monitor value"); return NULL; } #ifdef MONITOR_VALUE_MAGIC ret->magic = MONITOR_VALUE_MAGIC; #endif ret->type = MONITOR_VALUE_T__ARRAY; ret->array.children_count = n_children; ret->array.split_op_result = split_op; ret->array.children = children; return ret; }
/** Update database entry with new information (if needed) @param entry Entry to be updated @param new_config New config @return 0 if success, !0 in other case (reason printed) */ static int update_organization(organization_db_entry_t *entry, json_t *new_config) { int rc = 0; json_error_t jerr; json_int_t bytes_limit = 0; json_t *aux_enrichment = NULL; const int unpack_rc = json_unpack_ex(new_config, &jerr, JSON_STRICT, "{s?O,s?{s?I}}", "enrichment",&aux_enrichment, "limits","bytes",&bytes_limit); if (0 != unpack_rc) { const char *organization_uuid = organization_db_entry_get_uuid(entry); rdlog(LOG_ERR,"Couldn't unpack organization %s limits: %s", organization_uuid, jerr.text); rc = -1; goto unpack_err; } pthread_mutex_lock(&entry->mutex); swap_ptrs(entry->enrichment, aux_enrichment); entry->bytes_limit.max = (uint64_t)bytes_limit; pthread_mutex_unlock(&entry->mutex); unpack_err: if (aux_enrichment) { json_decref(aux_enrichment); } return rc; }
static void parse_rdkafka_keyval_config(rd_kafka_conf_t *rk_conf, rd_kafka_topic_conf_t *rkt_conf, const char *key, const char *value) { // Extracted from Magnus Edenhill's kafkacat rd_kafka_conf_res_t res; char errstr[512]; const char *name = key + strlen(CONFIG_RDKAFKA_KEY); res = RD_KAFKA_CONF_UNKNOWN; /* Try "topic." prefixed properties on topic * conf first, and then fall through to global if * it didnt match a topic configuration property. */ if (!strncmp(name, "topic.", strlen("topic."))) { res = rd_kafka_topic_conf_set(rkt_conf, name + strlen("topic."), value, errstr, sizeof(errstr)); } if (res == RD_KAFKA_CONF_UNKNOWN) { res = rd_kafka_conf_set( rk_conf, name, value, errstr, sizeof(errstr)); } if (res != RD_KAFKA_CONF_OK) { rdlog(LOG_ERR, "rdkafka: %s", errstr); } }
/** Try to send all configured warnings: log to console and send a PUT. @param org Organziation to warn about */ static void produce_organization_warning(const organization_db_entry_t *org) { rdlog(LOG_INFO, "Organization %s has reached it's bytes quota", organization_db_entry_get_uuid(org)); if (org->db->limit_reached_cb) { org->db->limit_reached_cb(org->db, org, org->db->limit_reached_cb_ctx); } }
static void print_monitor_value_enrichment_str(struct printbuf *buf, const char *key, json_object *val) { const char *str = json_object_get_string(val); if (NULL == str) { rdlog(LOG_ERR, "Cannot extract string value of enrichment key %s", key); } else { sprintbuf(buf, ",\"%s\":\"%s\"", key, str); } }
static int createListenSocket(const char *proto,uint16_t listen_port) { int listenfd = 0; if (NULL == proto) { rdlog(LOG_ERR,"Can't create listen socket: No protocol given"); return 0; } if (0 == strcmp(N2KAFKA_UDP,proto)) { listenfd = socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK,0); } else if (0 == strcmp(N2KAFKA_TCP,proto)) { listenfd = socket(AF_INET,SOCK_STREAM,0); } else { rdlog(LOG_ERR,"Can't create socket: Unknown type"); return 0; } if(listenfd==-1){ rdlog(LOG_ERR,"Error creating socket: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); return 0; } const int so_reuseaddr_value = 1; const int setsockopt_ret = setsockopt(listenfd,SOL_SOCKET, SO_REUSEADDR,&so_reuseaddr_value,sizeof(so_reuseaddr_value)); if(setsockopt_ret < 0){ rdlog(LOG_WARNING,"Error setting socket option: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); } struct sockaddr_in server_addr; memset(&server_addr,0,sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr=htonl(INADDR_ANY); assert(listen_port > 0); server_addr.sin_port=htons(listen_port); const int bind_ret = bind(listenfd,(struct sockaddr *)&server_addr,sizeof(server_addr)); if(bind_ret == -1){ rdlog(LOG_ERR,"Error binding socket: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); close(listenfd); return -1; } if(0 == strcmp(N2KAFKA_TCP,proto)) { const int listen_ret = listen(listenfd,SOMAXCONN); if(listen_ret == -1){ rdlog(LOG_ERR,"Error in listen: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); close(listenfd); return -1; } } rdlog(LOG_INFO,"Listening socket created successfuly"); return listenfd; }
rb_message_array_t * print_monitor_value(const struct monitor_value *monitor_value, const rb_monitor_t *monitor) { // clang-format off const size_t ret_size = monitor_value->type == MONITOR_VALUE_T__VALUE ? 1 : monitor_value->array.children_count + (monitor_value->array.split_op_result ? 1 : 0); // clang-format on rb_message_array_t *ret = new_messages_array(ret_size); if (ret == NULL) { rdlog(LOG_ERR, "Couldn't allocate messages array"); return NULL; } if (monitor_value->type == MONITOR_VALUE_T__VALUE) { print_monitor_value0(&ret->msgs[0], monitor_value, monitor, NO_INSTANCE); } else { size_t i_msgs = 0; assert(monitor_value->type == MONITOR_VALUE_T__ARRAY); for (size_t i = 0; i < monitor_value->array.children_count; ++i) { if (monitor_value->array.children[i]) { print_monitor_value0( &ret->msgs[i_msgs++], monitor_value->array .children[i], monitor, i); } } if (monitor_value->array.split_op_result) { rb_message *msg = &ret->msgs[i_msgs++]; assert(NULL == msg->payload); print_monitor_value0( msg, monitor_value->array.split_op_result, monitor, NO_INSTANCE); } ret->count = i_msgs; } return ret; }
static void read_cb(struct ev_loop *loop, struct ev_io *watcher, int revents) { if(EV_ERROR & revents) { rdlog(LOG_ERR,"Read callback error: %s",mystrerror(errno,errbuf, ERROR_BUFFER_SIZE)); } struct connection_private *connection = (struct connection_private *) watcher->data; struct sockaddr_in6 saddr; #ifdef CONNECTION_PRIVATE_MAGIC assert(connection->magic == CONNECTION_PRIVATE_MAGIC); #endif char *buffer = calloc(READ_BUFFER_SIZE,sizeof(char)); const int recv_result = receive_from_socket(watcher->fd,&saddr,buffer,READ_BUFFER_SIZE); if(recv_result > 0){ process_data_received_from_socket(buffer,(size_t)recv_result,connection->client, connection->callback,connection->callback_opaque); }else if(recv_result < 0){ if(errno == EAGAIN){ rdbg("Socket not ready. re-trying"); free(buffer); return; }else{ rdlog(LOG_ERR,"Recv error: %s",mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); free(buffer); close_socket_and_stop_watcher(loop,watcher); return; } }else{ /* recv_result == 0 */ free(buffer); close_socket_and_stop_watcher(loop,watcher); return; } if(NULL!=global_config.response && !connection->first_response_sent){ int send_ret = 1; rdlog(LOG_DEBUG,"Sending first response..."); if(global_config.response_len == 0){ rdlog(LOG_ERR,"Can't send first response to %s: size of response == 0",connection->client); connection->first_response_sent = 1; } else { send_ret = send_to_socket(watcher->fd,global_config.response,(size_t)global_config.response_len-1); } if(send_ret <= 0){ rdlog(LOG_ERR,"Cannot send first response to %s socket: %s", connection->client, mystrerror(errno,errbuf,ERROR_BUFFER_SIZE)); close_socket_and_stop_watcher(loop,watcher); } rdlog(LOG_DEBUG,"first response ok"); connection->first_response_sent = 1; } }
static void parse_zookeeper_json(struct _main_info *main_info, struct _worker_info *worker_info, json_object *zk_config) { char *host = NULL; int64_t pop_watcher_timeout = 0, push_timeout = 0; json_object *zk_sensors = NULL; json_object_object_foreach(zk_config, key, val) { if (0 == strcmp(key, "host")) { host = strdup(json_object_get_string(val)); } else if (0 == strcmp(key, "pop_watcher_timeout")) { pop_watcher_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "push_timeout")) { push_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "sensors")) { zk_sensors = val; } else { rdlog(LOG_ERR, "Don't know what zookeeper config.%s " "key means.", key); } if (errno != 0) { rdlog(LOG_ERR, "Could not parse %s value: %s", key, strerror(errno)); return; } } if (!host) { rdlog(LOG_ERR, "No zookeeper host specified. Can't use ZK."); return; } else if (0 == push_timeout) { rdlog(LOG_INFO, "No pop push_timeout specified. We will never " "be ZK masters."); return; } else if (push_timeout < 0) { rdlog(LOG_ERR, "Can't set a zk push timeout < 0 (%" PRId64 ")", push_timeout); return; } else if (pop_watcher_timeout < 0) { rdlog(LOG_ERR, "Can't set a zk pop timeout < 0 (%" PRId64 ")", pop_watcher_timeout); } main_info->zk = init_rbmon_zk(host, (uint64_t)pop_watcher_timeout, (uint64_t)push_timeout, zk_sensors, worker_info->queue); }
static void print_monitor_value_enrichment_int(struct printbuf *buf, const char *key, json_object *val) { errno = 0; int64_t integer = json_object_get_int64(val); if (errno != 0) { char errbuf[BUFSIZ]; const char *errstr = strerror_r(errno, errbuf, sizeof(errbuf)); rdlog(LOG_ERR, "Cannot extract int value of enrichment key %s: %s", key, errstr); } else { sprintbuf(buf, ",\"%s\":%ld", key, integer); } }
static void process_data_received_from_socket(char *buffer,const size_t recv_result, const char *client,decoder_callback callback,void *callback_opaque){ if(unlikely(global_config.debug)) rdlog(LOG_DEBUG,"received %zu data from %s: %.*s",recv_result,client, (int)recv_result,buffer); struct pair attrs_mem[1]; attrs_mem->key = "client_ip"; attrs_mem->value = client; keyval_list_t attrs = keyval_list_initializer(attrs); add_key_value_pair(&attrs,attrs_mem); if(unlikely(only_stdout_output())){ free(buffer); } else { callback(buffer,recv_result,&attrs,callback_opaque,NULL); } }
/** Extracts client information in a uuid_entry @param sensor_uuid client UUID @param sensor_config Client config @return Generated uuid entry */ static organization_db_entry_t *create_organization_db_entry( const char *organization_uuid, json_t *organization_config) { assert(organization_uuid); assert(organization_config); organization_db_entry_t *entry = NULL; rd_calloc_struct(&entry, sizeof(*entry), -1, organization_uuid, &entry->uuid_entry.uuid, RD_MEM_END_TOKEN); if (NULL == entry) { rdlog(LOG_ERR, "Couldn't create uuid %s entry (out of memory?).", organization_uuid); goto err; } #ifdef ORGANIZATION_DB_ENTRY_MAGIC entry->magic = ORGANIZATION_DB_ENTRY_MAGIC; #endif uuid_entry_init(&entry->uuid_entry); pthread_mutex_init(&entry->mutex, NULL); entry->refcnt = 1; entry->uuid_entry.data = entry; const int rc = update_organization(entry,organization_config); if (rc != 0) { goto err_update; } return entry; err_update: organizations_db_entry_decref(entry); entry = NULL; err: return entry; }
rb_monitor_value_array_t * rb_monitor_value_array_select(rb_monitor_value_array_t *array, ssize_t *pos) { if (NULL == pos || NULL == array) { return NULL; } const size_t ret_size = pos_array_length(pos); rb_monitor_value_array_t *ret = rb_monitor_value_array_new(ret_size); if (NULL == ret) { rdlog(LOG_ERR, "Couldn't allocate select return (OOM?)"); return NULL; } assert(array); assert(pos); for (size_t i = 0; - 1 != pos[i]; ++i) { rb_monitor_value_array_add(ret, array->elms[pos[i]]); } return ret; }
/** Creates a new topic handler using global configuration @param topic_name Topic name @param partitioner Partitioner function @return New topic handler */ rd_kafka_topic_t *new_rkt_global_config(const char *topic_name, rb_rd_kafka_partitioner_t partitioner,char *err,size_t errsize) { rd_kafka_topic_conf_t *template_config = global_config.kafka_topic_conf; rd_kafka_topic_conf_t *my_rkt_conf = rd_kafka_topic_conf_dup(template_config); if(NULL == my_rkt_conf) { rdlog(LOG_ERR,"Couldn't topic_conf_dup in topic %s",topic_name); return NULL; } rd_kafka_topic_conf_set_partitioner_cb(my_rkt_conf, partitioner); rd_kafka_topic_t *ret = rd_kafka_topic_new(global_config.rk, topic_name, my_rkt_conf); if (NULL == ret) { strerror_r(errno, err, errsize); rd_kafka_topic_conf_destroy(my_rkt_conf); } return ret; }
static int gen_jansson_value(yajl_gen gen, json_t *value) { json_error_t jerr; const char *str; size_t len; int rc; int type = json_typeof(value); switch(type) { case JSON_OBJECT: yajl_gen_map_open(gen); gen_jansson_object(gen,value); yajl_gen_map_close(gen); break; case JSON_ARRAY: yajl_gen_array_open(gen); gen_jansson_array(gen,value); yajl_gen_array_close(gen); break; case JSON_STRING: rc = json_unpack_ex(value, &jerr, 0, "s%", &str,&len); if(rc != 0) { rdlog(LOG_ERR,"Couldn't extract string: %s",jerr.text); return 0; } yajl_gen_string(gen, (const unsigned char *)str, len); break; case JSON_INTEGER: { json_int_t i = json_integer_value(value); yajl_gen_integer(gen,i); } break; case JSON_REAL: { double d = json_number_value(value); yajl_gen_double(gen,d); } break; case JSON_TRUE: yajl_gen_bool(gen,1); break; case JSON_FALSE: yajl_gen_bool(gen,0); break; case JSON_NULL: yajl_gen_null(gen); break; default: rdlog(LOG_ERR,"Unkown jansson type %d",type); break; }; return 1; }
/** Parse a JSON monitor @param type Type of monitor (oid, system, op...) @param cmd_arg Argument of monitor (desired oid, system command, operation...) @return New monitor */ static rb_monitor_t *parse_rb_monitor0(enum monitor_cmd_type type, const char *cmd_arg, json_object *json_monitor, json_object *sensor_enrichment) { assert(cmd_arg); assert(json_monitor); assert(sensor_enrichment); char *aux_name = PARSE_CJSON_CHILD_DUP_STR(json_monitor, "name", NULL); if (NULL == aux_name) { rdlog(LOG_ERR, "Monitor with no name"); return NULL; } char *aux_split_op = PARSE_CJSON_CHILD_DUP_STR( json_monitor, "split_op", NULL); char *unit = PARSE_CJSON_CHILD_DUP_STR(json_monitor, "unit", NULL); char *group_name = PARSE_CJSON_CHILD_DUP_STR( json_monitor, "group_name", NULL); /// @todo change to true/false int aux_timestamp_given = PARSE_CJSON_CHILD_INT64( json_monitor, "timestamp_given", 0); if (aux_split_op && !valid_split_op(aux_split_op)) { rdlog(LOG_WARNING, "Invalid split op %s of monitor %s", aux_split_op, aux_name); free(aux_split_op); aux_split_op = NULL; } if (type == RB_MONITOR_T__OP && aux_timestamp_given) { rdlog(LOG_WARNING, "Can't provide timestamp in op monitor (%s)", aux_name); aux_timestamp_given = 0; } /// tmp monitor to locate all string parameters rb_monitor_t *ret = calloc(1, sizeof(*ret)); if (NULL == ret) { rdlog(LOG_ERR, "Can't alloc sensor monitor (out of memory?)"); free(aux_name); free(aux_split_op); free(unit); return NULL; } #ifdef RB_MONITOR_MAGIC ret->magic = RB_MONITOR_MAGIC; #endif ret->splittok = PARSE_CJSON_CHILD_DUP_STR(json_monitor, "split", NULL); ret->splitop = aux_split_op; ret->name = aux_name; ret->name_split_suffix = PARSE_CJSON_CHILD_DUP_STR( json_monitor, "name_split_suffix", NULL); ret->instance_prefix = PARSE_CJSON_CHILD_DUP_STR( json_monitor, "instance_prefix", NULL); ret->group_id = PARSE_CJSON_CHILD_DUP_STR( json_monitor, "group_id", NULL); ret->timestamp_given = aux_timestamp_given; ret->send = PARSE_CJSON_CHILD_INT64(json_monitor, "send", 1); ret->integer = PARSE_CJSON_CHILD_INT64(json_monitor, "integer", 0); ret->type = type; ret->cmd_arg = strdup(cmd_arg); ret->enrichment = json_object_object_copy(sensor_enrichment); if (NULL == ret->enrichment) { rdlog(LOG_CRIT, "Couldn't allocate monitor enrichment (OOM?)"); rb_monitor_done(ret); ret = NULL; goto err; } #define RB_MONITOR_ENRICHMENT_STR(mkey, mval) \ { \ .key = mval ? mkey : NULL, \ .val = mval ? json_object_new_string(mval) : NULL, \ } // clang-format off const struct { const char *key; json_object *val; } enrichment_add[] = { { .key = "type", .val = json_object_new_string(rb_monitor_type(ret)), },
static json_bool parse_json_config(json_object *config, struct _worker_info *worker_info, struct _main_info *main_info) { int ret = TRUE; json_object_object_foreach(config, key, val) { errno = 0; if (0 == strcmp(key, "debug")) { rd_log_set_severity(json_object_get_int64(val)); } else if (0 == strcmp(key, "stdout")) { #if 0 /// @TODO recover if(json_object_get_int64(val)) worker_info->debug_output_flags |= DEBUG_STDOUT; else worker_info->debug_output_flags &= ~DEBUG_STDOUT; #endif } else if (0 == strcmp(key, "syslog")) { #if 0 /// @TODO recover if(json_object_get_int64(val)) worker_info->debug_output_flags |= DEBUG_SYSLOG; else worker_info->debug_output_flags &= ~DEBUG_SYSLOG; #endif } else if (0 == strcmp(key, "threads")) { int64_t threads = json_object_get_int64(val); if (threads <= 0) { rdlog(LOG_WARNING, "Can't use %" PRId64 " threads", threads); } else { main_info->threads = (uint64_t)threads; } } else if (0 == strcmp(key, "timeout")) { worker_info->timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "max_snmp_fails")) { worker_info->max_snmp_fails = json_object_get_int64(val); } else if (0 == strcmp(key, "max_kafka_fails")) { worker_info->max_kafka_fails = json_object_get_string(val); } else if (0 == strcmp(key, "sleep_main")) { int64_t sleep_s = json_object_get_int64(val); if (sleep_s <= 0) { rdlog(LOG_WARNING, "Can't sleep for %" PRId64 "\"", sleep_s); } else { main_info->sleep_main = (uint64_t)sleep_s; } } else if (0 == strcmp(key, "kafka_broker")) { worker_info->kafka_broker = json_object_get_string(val); } else if (0 == strcmp(key, "kafka_topic")) { worker_info->kafka_topic = json_object_get_string(val); } else if (0 == strcmp(key, "kafka_timeout")) { worker_info->kafka_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "sleep_worker")) { worker_info->sleep_worker = json_object_get_int64(val); } else if (0 == strcmp(key, CONFIG_RDKAFKA_KEY)) { parse_rdkafka_config_json(worker_info, key, val); #ifdef HAVE_RBHTTP } else if (0 == strcmp(key, "http_endpoint")) { worker_info->http_endpoint = json_object_get_string(val); } else if (0 == strcmp(key, "http_max_total_connections")) { worker_info->http_max_total_connections = json_object_get_int64(val); } else if (0 == strcmp(key, "http_timeout")) { worker_info->http_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "http_connttimeout")) { worker_info->http_connttimeout = json_object_get_int64(val); } else if (0 == strcmp(key, "http_verbose")) { worker_info->http_verbose = json_object_get_int64(val); } else if (0 == strcmp(key, "http_insecure")) { worker_info->http_insecure = json_object_get_int64(val); } else if (0 == strcmp(key, "rb_http_max_messages")) { worker_info->rb_http_max_messages = json_object_get_int64(val); } else if (0 == strcmp(key, "rb_http_mode")) { const char *sval = json_object_get_string(val); if (!sval) { rdlog(LOG_ERR, "Invalid rb_http_mode"); } else if (0 == strcmp(sval, "normal")) { worker_info->http_mode = RB_HTTP_NORMAL_MODE; } else if (0 == strcmp(sval, "deflated")) { worker_info->http_mode = CHUNKED_MODE; } else { rdlog(LOG_ERR, "Invalid rb_http_mode %s", sval); } #else } else if (0 == strncmp(key, "http_", strlen("http_"))) { rdlog(LOG_ERR, "rb_monitor does not have librbhttp support, so" " %s key is invalid. Please compile it with %s", key, ENABLE_RBHTTP_CONFIGURE_OPT); #endif } else if (0 == strcmp(key, "snmp_traps")) { struct json_object *jserver_name = NULL; json_object_object_get_ex( val, "server_name", &jserver_name); if (NULL == jserver_name) { rdlog(LOG_ERR, "snmp traps object with no server name"); continue; } main_info->snmp_traps.handler.server_name = json_object_get_string(jserver_name); if (NULL == main_info->snmp_traps.handler.server_name) { rdlog(LOG_ERR, "Couldn't extract JSON server name (bad " "type?)"); } } else { rdlog(LOG_ERR, "Don't know what config.%s key means.", key); } if (errno != 0) { rdlog(LOG_ERR, "Could not parse %s value: %s", key, strerror(errno)); ret = FALSE; } }
static int createListenSocketMutex(pthread_mutex_t *mutex){ const int init_returned = pthread_mutex_init(mutex,NULL); if(init_returned!=0) rdlog(LOG_ERR,"Error creating mutex: "); return init_returned; }
static void print_accepted_connection_log(const struct sockaddr_in *sa){ char str[sizeof(INET_ADDRSTRLEN)]; inet_ntop(AF_INET, &(sa->sin_addr), str, INET_ADDRSTRLEN); rdlog(LOG_INFO,"Accepted connection from %s:%d",str,get_port(sa)); }