int main(void) { struct json *reply, *item1, *item2, *params; struct jrpc_client *client = &my_client; int ret = jrpc_client_init(client, HOST, PORT); if (ret != 0) { exit(ret); } item1 = json_create_object(); json_add_number_to_object(item1, "A", 3); json_add_number_to_object(item1, "B", 10); item2 = json_create_object(); json_add_number_to_object(item2, "A", 1); json_add_number_to_object(item2, "B", 2); params = json_create_array(); json_add_item_to_object(item1,"S", item2); json_add_item_to_array(params, item1); // jrpc_client_call will free params if ((ret = jrpc_client_call(client, "foo", params, &reply)) != 0) { exit(ret); } printf("%s\n", json_to_string(reply)); json_delete(reply); jrpc_client_close(client); return 0; }
static int send_error(struct jrpc_connection *conn, int code, char *message, struct json *id) { int return_value = 0; struct json *result_root = json_create_object(); struct json *error_root = json_create_object(); json_add_number_to_object(error_root, "code", code); json_add_string_to_object(error_root, "message", message); json_add_item_to_object(result_root, "error", error_root); json_add_item_to_object(result_root, "id", id); return_value = send_response(conn, json_to_string_unformatted(result_root)); json_delete(result_root); free(message); return return_value; }
// 如果添加失败 返回 NULL // 如果添加成功,返回所添加的 json_object_t 结构指针 json_object_t * json_append(json_object_t * obj, const char * key, size_t key_len, json_type_t value_type, void * value, size_t value_len) { assert(obj); assert( (obj->object_type == JSON_OBJECT && key) || (obj->object_type != JSON_OBJECT) ); while( obj->value_type != JSON_NONE ) { if( !obj->next ) { if( obj->object_type == JSON_OBJECT ) { obj->next = json_create_object(); } else if ( obj->object_type == JSON_ARRAY ) { obj->next = json_create_array(); } else { return NULL; } } if( obj->next ) { obj = obj->next; } else { return NULL; } } if( obj->object_type == JSON_OBJECT ) { #ifdef __x86_64__ assert( !(key_len >> 18) ); #elif __i386__ assert( !(key_len >> 8) ); #endif obj->key = (char *)key; obj->key_len = key_len; }
void InitJSON( void ) { gsg.json_context = json_create_context(); gsg.base_message = json_create_object( gsg.json_context, sizeof( game_message ) ); json_add_object_member( gsg.base_message, "MsgID", offsetof( struct game_message, ID ), JSON_Element_Integer_32, 0 ); json_add_object_member( gsg.base_message, NULL, offsetof( struct game_message, extra ), JSON_Element_Raw_Object, 0 ); }
void show_idle_prof_stats(int output, struct json_object *parent, struct buf_output *out) { int i, nr_cpus = ipc.nr_cpus; struct json_object *tmp; char s[MAX_CPU_STR_LEN]; if (output == FIO_OUTPUT_NORMAL) { if (ipc.opt > IDLE_PROF_OPT_CALI) log_buf(out, "\nCPU idleness:\n"); else if (ipc.opt == IDLE_PROF_OPT_CALI) log_buf(out, "CPU idleness:\n"); if (ipc.opt >= IDLE_PROF_OPT_SYSTEM) log_buf(out, " system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1)); if (ipc.opt == IDLE_PROF_OPT_PERCPU) { log_buf(out, " percpu: %3.2f%%", fio_idle_prof_cpu_stat(0)); for (i = 1; i < nr_cpus; i++) log_buf(out, ", %3.2f%%", fio_idle_prof_cpu_stat(i)); log_buf(out, "\n"); } if (ipc.opt >= IDLE_PROF_OPT_CALI) { log_buf(out, " unit work: mean=%3.2fus,", ipc.cali_mean); log_buf(out, " stddev=%3.2f\n", ipc.cali_stddev); } /* dynamic mem allocations can now be freed */ if (ipc.opt != IDLE_PROF_OPT_NONE) fio_idle_prof_cleanup(); return; } if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output & FIO_OUTPUT_JSON)) { if (!parent) return; tmp = json_create_object(); if (!tmp) return; json_object_add_value_object(parent, "cpu_idleness", tmp); json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1)); if (ipc.opt == IDLE_PROF_OPT_PERCPU) { for (i = 0; i < nr_cpus; i++) { snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i); json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i)); } } json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean); json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev); fio_idle_prof_cleanup(); } }
static int send_result(struct jrpc_connection *conn, struct json *result, struct json *id) { int return_value = 0; struct json *result_root = json_create_object(); if (result) json_add_item_to_object(result_root, "result", result); json_add_item_to_object(result_root, "id", id); return_value = send_response(conn, json_to_string_unformatted(result_root)); json_delete(result_root); return return_value; }
static struct json *foo(struct jrpc_context * ctx, struct json *params, struct json *id) { struct json *reply, *item, *array; int a, b, i; char buf[1024]; json_dump(params); json_dump(id); item = json_get_object_item(params->child, "A"); a = item->valueint; item = json_get_object_item(params->child, "B"); b = item->valueint; sprintf(buf, "recv a:%d b:%d", a, b); array = json_create_array(); for(i = 0; i < a; i ++){ item = json_create_object(); json_add_number_to_object(item, "A", i); json_add_number_to_object(item, "B", b++); json_add_item_to_array(array, item); } reply = json_create_object(); json_add_item_to_object(reply, "Args", array); json_add_string_to_object(reply, "Str", buf); json_dump(reply); return reply; }
int jrpc_client_call(struct jrpc_client *client, const char *method, struct json *params, struct json **response) { int fd, max_read_size; size_t bytes_read = 0; char *new_buffer, *end_ptr = NULL; struct jrpc_connection *conn; struct json *root, *request; request = json_create_object(); json_add_string_to_object(request, "method", method); json_add_item_to_object(request, "params", params); json_add_number_to_object(request, "id", client->id); send_request(&client->conn, json_to_string(request)); json_delete(request); // read conn = &client->conn; fd = conn->sock.fd; for (;;) { if (conn->pos == (conn->buffer_size - 1)) { conn->buffer_size *= 2; new_buffer = realloc(conn->buffer, conn->buffer_size); if (new_buffer == NULL) { perror("Memory error"); return -ENOMEM; } conn->buffer = new_buffer; memset(conn->buffer + conn->pos, 0, conn->buffer_size - conn->pos); } // can not fill the entire buffer, string must be NULL terminated max_read_size = conn->buffer_size - conn->pos - 1; if ((bytes_read = read(fd, conn->buffer + conn->pos, max_read_size)) == -1) { elog("read %d\n", strerror(errno)); return -EIO; } if (!bytes_read) { // client closed the sending half of the connection if (client->conn.debug_level) dlog("Client closed connection.\n"); return -EIO; } conn->pos += bytes_read; if ((root = json_parse_stream(conn->buffer, &end_ptr)) != NULL) { if (client->conn.debug_level > 1) { dlog("Valid JSON Received:\n%s\n", json_to_string(root)); } if (root->type == JSON_T_OBJECT) { struct json *id = json_get_object_item(root, "id"); if (id->type == JSON_T_STRING) { if (client->id != atoi(id->string)) goto out; } else if (id->type == JSON_T_NUMBER) { if (client->id != id->valueint) goto out; } client->id++; //shift processed request, discarding it memmove(conn->buffer, end_ptr, strlen(end_ptr) + 2); conn->pos = strlen(end_ptr); memset(conn->buffer + conn->pos, 0, conn->buffer_size - conn->pos - 1); *response = json_detach_item_from_object(root, "result"); if (*response == NULL) goto out; json_delete(root); return 0; } out: elog("INVALID JSON Received:\n---\n%s\n---\n", conn->buffer); json_delete(root); return -EINVAL; } else if (end_ptr != (conn->buffer + conn->pos)) { // did we parse the all buffer? If so, just wait for more. // else there was an error before the buffer's end if (client->conn.debug_level) { elog("INVALID JSON Received:\n---\n%s\n---\n", conn->buffer); } send_error(conn, JRPC_PARSE_ERROR, strdup("Parse error. Invalid JSON" " was received by the client."), NULL); return -EINVAL; } } }
int json_parse_object( json_task_t *task, json_object_t *parent ) { char ch; json_object_t node, * append = NULL; node.next = parent; node.key = NULL; node.key_len = 0; if( !task->callback ) { if( !task->root ) { append = task->root = json_create_object(); } else { append = parent->value.p = json_create_object(); } } task->status = STS_OBJECT_START; while(( ch = *(task->str + task->count) )) { task->count ++; if( ch == ' ' || ch == '\n' || ch == '\t' ) { continue; } switch( task->status ) { case STS_OBJECT_START: if( ch == '"' ) { node.key = task->str + task->count; if( json_parse_string( task ) != 0 ) { return -1; } // WARNING: key_len 可能发生溢出 node.key_len = task->str + task->count - node.key - 1; task->status = STS_OBJECT_COLON; } else if( ch == '}' ) { // 空对象 {},忽略 return 0; } else { task->err_msg = "expect '\"' or '}'"; return -1; } break; case STS_OBJECT_COLON: if( ch == ':' ) { task->status = STS_OBJECT_VALUE; } else { task->err_msg = "expect ':'"; return -1; } break; case STS_OBJECT_VALUE: /* 这里需要将计数减一,因为 json_parse_value 需要根据这一个字符判断 value 类型 */ task->count --; node.value.s = task->str + task->count; if( json_parse_value( task, &node ) != 0 ) { if( node.value_type == JSON_OBJECT || node.value_type == JSON_ARRAY ) { json_delete_object(node.value.p); } return -1; } // WARNING: value_len 可能发生溢出 node.value_len = task->str + task->count - node.value.s; task->status = STS_OBJECT_COMMA; break; case STS_OBJECT_COMMA: if( ( ch == ',' || ch == '}' ) ) { // 对 value 进行处理 switch(node.value_type) { case JSON_STRING: // 去除字符串两端的引号 node.value.s += 1; node.value_len -= 2; break; case JSON_DOUBLE: node.value.d = atof(node.value.s); break; case JSON_LONGLONG: node.value.l = atoll(node.value.s); break; case JSON_ARRAY: case JSON_OBJECT: break; } if( task->callback ) { task->callback( task, &node ); } if( !task->callback ) { switch( node.value_type ) { case JSON_ARRAY: case JSON_OBJECT: case JSON_STRING: append = json_append(append, node.key, node.key_len, node.value_type, node.value.p, node.value_len); break; default: append = json_append(append, node.key, node.key_len, node.value_type, &node.value, node.value_len); } } } if( ch == ',' ) { task->status = STS_OBJECT_START; } else if( ch == '}' ) { return 0; } else { task->err_msg = "expect ',' or '}'"; return -1; } break; default: task->err_msg = "unknown status"; return -1; } } task->err_msg = "unexpect EOF"; return -1; }
/* * Convert the internal Fluent Bit data representation to the required * one by Elasticsearch. * * 'Sadly' this process involves to convert from Msgpack to JSON. */ static char *es_format(void *data, size_t bytes, int *out_size, struct flb_out_es_config *ctx) { int i; int ret; int n_size; int index_len; uint32_t psize; size_t off = 0; time_t atime; char *buf; char *ptr_key = NULL; char *ptr_val = NULL; char buf_key[256]; char buf_val[512]; msgpack_unpacked result; msgpack_object root; msgpack_object map; char *j_entry; char j_index[ES_BULK_HEADER]; json_t *j_map; struct es_bulk *bulk; /* Iterate the original buffer and perform adjustments */ msgpack_unpacked_init(&result); /* Perform some format validation */ ret = msgpack_unpack_next(&result, data, bytes, &off); if (!ret) { return NULL; } /* We 'should' get an array */ if (result.data.type != MSGPACK_OBJECT_ARRAY) { /* * If we got a different format, we assume the caller knows what he is * doing, we just duplicate the content in a new buffer and cleanup. */ return NULL; } root = result.data; if (root.via.array.size == 0) { return NULL; } /* Create the bulk composer */ bulk = es_bulk_create(); if (!bulk) { return NULL; } /* Format the JSON header required by the ES Bulk API */ index_len = snprintf(j_index, ES_BULK_HEADER, ES_BULK_INDEX_FMT, ctx->index, ctx->type); off = 0; msgpack_unpacked_destroy(&result); msgpack_unpacked_init(&result); while (msgpack_unpack_next(&result, data, bytes, &off)) { if (result.data.type != MSGPACK_OBJECT_ARRAY) { continue; } /* Each array must have two entries: time and record */ root = result.data; if (root.via.array.size != 2) { continue; } /* Create a map entry */ j_map = json_create_object(); atime = root.via.array.ptr[0].via.u64; map = root.via.array.ptr[1]; n_size = map.via.map.size + 1; json_add_to_object(j_map, "date", json_create_number(atime)); for (i = 0; i < n_size - 1; i++) { msgpack_object *k = &map.via.map.ptr[i].key; msgpack_object *v = &map.via.map.ptr[i].val; if (k->type != MSGPACK_OBJECT_BIN && k->type != MSGPACK_OBJECT_STR) { continue; } /* Store key */ psize = k->via.bin.size; if (psize <= (sizeof(buf_key) - 1)) { memcpy(buf_key, k->via.bin.ptr, psize); buf_key[psize] = '\0'; ptr_key = buf_key; } else { /* Long JSON map keys have a performance penalty */ ptr_key = flb_malloc(psize + 1); memcpy(ptr_key, k->via.bin.ptr, psize); ptr_key[psize] = '\0'; } /* * Sanitize key name, Elastic Search 2.x don't allow dots * in field names: * * https://goo.gl/R5NMTr */ char *p = ptr_key; char *end = ptr_key + psize; while (p != end) { if (*p == '.') *p = '_'; p++; } /* Store value */ if (v->type == MSGPACK_OBJECT_NIL) { json_add_to_object(j_map, ptr_key, json_create_null()); } else if (v->type == MSGPACK_OBJECT_BOOLEAN) { json_add_to_object(j_map, ptr_key, json_create_bool(v->via.boolean)); } else if (v->type == MSGPACK_OBJECT_POSITIVE_INTEGER) { json_add_to_object(j_map, ptr_key, json_create_number(v->via.u64)); } else if (v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) { json_add_to_object(j_map, ptr_key, json_create_number(v->via.i64)); } else if (v->type == MSGPACK_OBJECT_FLOAT) { json_add_to_object(j_map, ptr_key, json_create_number(v->via.f64)); } else if (v->type == MSGPACK_OBJECT_STR) { /* String value */ psize = v->via.str.size; if (psize <= (sizeof(buf_val) - 1)) { memcpy(buf_val, v->via.str.ptr, psize); buf_val[psize] = '\0'; ptr_val = buf_val; } else { ptr_val = flb_malloc(psize + 1); memcpy(ptr_val, k->via.str.ptr, psize); ptr_val[psize] = '\0'; } json_add_to_object(j_map, ptr_key, json_create_string(ptr_val)); } else if (v->type == MSGPACK_OBJECT_BIN) { /* Bin value */ psize = v->via.bin.size; if (psize <= (sizeof(buf_val) - 1)) { memcpy(buf_val, v->via.bin.ptr, psize); buf_val[psize] = '\0'; ptr_val = buf_val; } else { ptr_val = flb_malloc(psize + 1); memcpy(ptr_val, k->via.bin.ptr, psize); ptr_val[psize] = '\0'; } json_add_to_object(j_map, ptr_key, json_create_string(ptr_val)); } if (ptr_key && ptr_key != buf_key) { flb_free(ptr_key); } ptr_key = NULL; if (ptr_val && ptr_val != buf_val) { flb_free(ptr_val); } ptr_val = NULL; } /* * At this point we have our JSON message, but in order to * ingest this data into Elasticsearch we need to compose the * Bulk API request, sadly it requires to prepend a JSON entry * with details about the target 'index' and 'type' for EVERY * message. */ j_entry = json_print_unformatted(j_map); json_delete(j_map); ret = es_bulk_append(bulk, j_index, index_len, j_entry, strlen(j_entry)); flb_free(j_entry); if (ret == -1) { /* We likely ran out of memory, abort here */ msgpack_unpacked_destroy(&result); *out_size = 0; es_bulk_destroy(bulk); return NULL; } } msgpack_unpacked_destroy(&result); *out_size = bulk->len; buf = bulk->ptr; /* * Note: we don't destroy the bulk as we need to keep the allocated * buffer with the data. Instead we just release the bulk context and * return the bulk->ptr buffer */ flb_free(bulk); return buf; }