int16 AttrGetBoolean( StringPtr attrs, StringPtr key, boolean *value ) { uint8 valstr[32] = ""; *value = FALSE; parse_items( attrs, key, valstr ); return read_cfg_bool( valstr, value ); }
int load_cfg_log(json_t *root, const char *key, log_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; ERR_RET(read_cfg_str(node, "path", &cfg->path, NULL)); ERR_RET(read_cfg_str(node, "flag", &cfg->flag, NULL)); cfg->shift = 0; char *shift; ERR_RET(read_cfg_str(node, "shift", &shift, "size")); strtolower(shift); if (strcmp(shift, "size") == 0) { cfg->shift |= DLOG_SHIFT_BY_SIZE; } else if (strcmp(shift, "hour") == 0) { cfg->shift |= DLOG_SHIFT_BY_HOUR; } else if (strcmp(shift, "min") == 0) { cfg->shift |= DLOG_SHIFT_BY_MIN; } else { cfg->shift |= DLOG_SHIFT_BY_DAY; } bool is_pid; bool is_fork; ERR_RET(read_cfg_bool(node, "pid", &is_pid, false, true)); ERR_RET(read_cfg_bool(node, "fork", &is_fork, false, true)); if (is_pid) { cfg->shift |= DLOG_LOG_PID; } if (is_fork) { cfg->shift |= DLOG_USE_FORK; } ERR_RET(read_cfg_int(node, "max", &cfg->max, false, 100 * 1000 * 1000)); ERR_RET(read_cfg_int(node, "num", &cfg->num, false, 100)); ERR_RET(read_cfg_int(node, "keep", &cfg->keep, false, 7)); return 0; }
static int read_config_from_json(json_t *root) { int ret; ret = read_cfg_bool(root, "debug", &settings.debug, false, false); if (ret < 0) { printf("read debug config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_process(root, "process", &settings.process); if (ret < 0) { printf("load process config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_log(root, "log", &settings.log); if (ret < 0) { printf("load log config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_alert(root, "alert", &settings.alert); if (ret < 0) { printf("load alert config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_rpc_svr(root, "svr", &settings.svr); if (ret < 0) { printf("load svr config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_kafka_consumer(root, "deals", &settings.deals); if (ret < 0) { printf("load kafka deals config fail: %d\n", ret); return -__LINE__; } ret = load_cfg_redis_sentinel(root, "redis", &settings.redis); if (ret < 0) { printf("load kafka deals config fail: %d\n", ret); return -__LINE__; } ERR_RET_LN(read_cfg_int(root, "sec_max", &settings.sec_max, false, 86400 * 7)); ERR_RET_LN(read_cfg_int(root, "min_max", &settings.min_max, false, 60 * 24 * 365)); ERR_RET_LN(read_cfg_int(root, "hour_max", &settings.hour_max, false, 24 * 365 * 10)); ERR_RET_LN(read_cfg_real(root, "cache_timeout", &settings.cache_timeout, false, 0.45)); settings.timezone = get_timezone_offset(); return 0; }
int load_cfg_rpc_svr(json_t *root, const char *key, rpc_svr_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; json_t *bind = json_object_get(node, "bind"); if (!bind) return -__LINE__; if (json_is_string(bind)) { cfg->bind_count = 1; cfg->bind_arr = malloc(sizeof(nw_svr_bind)); if (nw_sock_cfg_parse(json_string_value(bind), &cfg->bind_arr[0].addr, &cfg->bind_arr[0].sock_type) < 0) return -__LINE__; } else if (json_is_array(bind)) { cfg->bind_count = json_array_size(bind); if (cfg->bind_count == 0) return -__LINE__; cfg->bind_arr = malloc(sizeof(nw_svr_bind) * cfg->bind_count); for (uint32_t i = 0; i < cfg->bind_count; ++i) { json_t *row = json_array_get(bind, i); if (!json_is_string(row)) return -__LINE__; if (nw_sock_cfg_parse(json_string_value(row), &cfg->bind_arr[i].addr, &cfg->bind_arr[i].sock_type) < 0) return -__LINE__; } } else { return -__LINE__; } ERR_RET(read_cfg_uint32(node, "max_pkg_size", &cfg->max_pkg_size, true, 0)); ERR_RET(read_cfg_uint32(node, "buf_limit", &cfg->buf_limit, false, 0)); ERR_RET(read_cfg_uint32(node, "read_mem", &cfg->read_mem, false, 0)); ERR_RET(read_cfg_uint32(node, "write_mem", &cfg->write_mem, false, 0)); ERR_RET(read_cfg_bool(node, "heartbeat_check", &cfg->heartbeat_check, false, true)); return 0; }