void pushEventToHistory(EventHistory_t* history, time_t time, const char* type, const char* origin, const char** params, unsigned int paramCount) { // If our event history is currently too large, get rid of the oldest // element. if (json_array_size(history->root) >= history->maxSize) { popEvent(history); } json_t* eventObj = json_object(); json_object_set_new(eventObj, "time", json_integer(time)); json_object_set_new(eventObj, "type", json_string(type)); json_object_set_new(eventObj, "origin", json_string(origin)); json_object_set_new(eventObj, "params", json_array()); json_t* jsonParamArray = json_object_get(eventObj, "params"); // Fill our json parameter array. for (unsigned int i = 0; i < paramCount; i++) { json_array_append(jsonParamArray, json_string(params[i])); } json_array_append(history->root, eventObj); exportHistory(history, "messages.json"); }
char * slayer_server_stats_tojson(slayer_server_stats_t *istats,apr_pool_t *mpool) { slayer_server_stats_t stats; slayer_server_stats_get(istats,&stats); json_value *container = json_object_create(mpool); json_object_add(container,"total_requests",json_long_create(mpool,stats.total_requests)); json_value *hits = json_array_create(mpool,stats.nslice); json_value *slices = json_array_create(mpool,stats.nslice); int i; for ( i = stats.offset+1; i < stats.nslice; i++) { if (stats.slices[i] != 0) { json_array_append(hits,json_long_create(mpool,stats.hits[i])); json_array_append(slices,json_long_create(mpool,stats.slices[i])); } } for ( i = 0; i < stats.offset; i++) { if (stats.slices[i] != 0) { json_array_append(hits,json_long_create(mpool,stats.hits[i])); json_array_append(slices,json_long_create(mpool,stats.slices[i])); } } json_object_add(container,"hits",hits); json_object_add(container,"slices",slices); json_object_add(container,"start_time",json_long_create(mpool,stats.start_time)); json_object_add(container,"current_time",json_long_create(mpool,apr_time_now() / (1000*1000))); return json_serialize(mpool,container); }
json_t* process_quaternion(const struct aiQuaternion* quat) { json_t* json = json_array(); json_array_append(json, json_real(quat->w)); json_array_append(json, json_real(quat->x)); json_array_append(json, json_real(quat->y)); json_array_append(json, json_real(quat->z)); return json; }
static void test_remove(void) { json_t *array, *five, *seven; array = json_array(); five = json_integer(5); seven = json_integer(7); if(!array) fail("unable to create array"); if(!five) fail("unable to create integer"); if(!seven) fail("unable to create integer"); if(!json_array_remove(array, 0)) fail("able to remove an unexisting index"); if(json_array_append(array, five)) fail("unable to append"); if(!json_array_remove(array, 1)) fail("able to remove an unexisting index"); if(json_array_remove(array, 0)) fail("unable to remove"); if(json_array_size(array) != 0) fail("array size is invalid after removing"); if(json_array_append(array, five) || json_array_append(array, seven) || json_array_append(array, five) || json_array_append(array, seven)) fail("unable to append"); if(json_array_remove(array, 2)) fail("unable to remove"); if(json_array_size(array) != 3) fail("array size is invalid after removing"); if(json_array_get(array, 0) != five || json_array_get(array, 1) != seven || json_array_get(array, 2) != seven) fail("remove works incorrectly"); json_decref(five); json_decref(seven); json_decref(array); }
// ------------------------------------------------------------------------------------------------- static void DumpTreeJSON ( le_cfg_IteratorRef_t iterRef, ///< Read the tree data from this iterator. json_t* jsonObject ///< JSON object to hold the tree data. ) // ------------------------------------------------------------------------------------------------- { // Note that because this is a recursive function, the buffer here is static in order to save on // stack space. The implication here is that we then have to be careful how it is later // accessed. Also, this makes the function not thread safe. But this trade off was made as // this was not intended to be a multi-threaded program. static char strBuffer[LE_CFG_STR_LEN_BYTES] = ""; // Build up the child array. json_t* childArrayPtr = json_array(); do { // Simply grab the name and the type of the current node. le_cfg_GetNodeName(iterRef, "", strBuffer, sizeof(strBuffer)); le_cfg_nodeType_t type = le_cfg_GetNodeType(iterRef, ""); switch (type) { // It's a stem object, so mark this item as being a stem and recurse into the stem's // sub-items. case LE_CFG_TYPE_STEM: { json_t* nodePtr = CreateJsonNode(strBuffer, NodeTypeStr(type)); le_cfg_GoToFirstChild(iterRef); DumpTreeJSON(iterRef, nodePtr); le_cfg_GoToParent(iterRef); json_array_append(childArrayPtr, nodePtr); } break; default: { json_t* nodePtr = CreateJsonNodeFromIterator(iterRef); if (nodePtr != NULL) { json_array_append(childArrayPtr, nodePtr); } } break; } } while (le_cfg_GoToNextSibling(iterRef) == LE_OK); // Set children into the JSON document. json_object_set_new(jsonObject, JSON_FIELD_CHILDREN, childArrayPtr); }
void pss_response_delNode(req_t * req, json_t * response, int32_t requestId, void *sweb, req_store_t * req_store) { const char *ack = json_string_value(json_object_get(response, "ack")); if (strcmp(ack, "ok") == 0) { json_t *web_resp = json_object(); json_object_set_new(web_resp, "type", json_string("newData")); //TODO at the moment only the original node gets the update, which is good enough for me json_t *sessionIds = json_array(); json_array_append(sessionIds, json_object_get(req->request, "sessionId")); json_object_set_new(web_resp, "sessionIds", sessionIds); json_t *newData = json_object(); json_t *deletedNodes = json_array(); json_array_append(deletedNodes, json_object_get (json_object_get (json_object_get (req->request, "clientRequest"), "request"), "id")); json_object_set_new(newData, "deletedNodes", deletedNodes); json_object_set_new(web_resp, "newData", newData); zmsg_t *res = zmsg_new(); char *web_res_str = json_dumps(web_resp, JSON_COMPACT); printf("\nbroker:sweb sent: %s\n", web_res_str); zmsg_addstr(res, web_res_str); free(web_res_str); zmsg_wrap(res, req->address); zmsg_send(&res, sweb); json_decref(web_resp); } else { if (strcmp(ack, "fail") == 0) { //TODO ?? } } request_store_delete(req_store, requestId); json_decref(response); }
void AssemblyWriter::writeToString(utString& out) { json_t *json = json_object(); json_object_set(json, "type", json_string("ASSEMBLY")); json_object_set(json, "name", json_string(name.c_str())); json_object_set(json, "version", json_string(version.c_str())); json_object_set(json, "uid", json_string(uid.c_str())); json_object_set(json, "loomconfig", json_string(loomConfig.c_str())); #ifdef LOOM_ENABLE_JIT json_object_set(json, "jit", json_true()); #else json_object_set(json, "jit", json_false()); #endif json_object_set(json, "debugbuild", LSCompiler::isDebugBuild() ? json_true() : json_false()); // references json_t *refArray = json_array(); json_object_set(json, "references", refArray); for (UTsize i = 0; i < references.size(); i++) { utString assemblyName = references.at(i); json_t *ro = json_object(); json_object_set(ro, "name", json_string(assemblyName.c_str())); json_array_append(refArray, ro); } // modules json_t *moduleArray = json_array(); json_object_set(json, "modules", moduleArray); for (UTsize i = 0; i < modules.size(); i++) { json_t *mjson = modules[i]->write(); json_array_append(moduleArray, mjson); } out = json_dumps(json, JSON_INDENT(3) | JSON_SORT_KEYS | JSON_PRESERVE_ORDER | JSON_COMPACT); }
void dslink_response_send_val(DSLink *link, DSNode *node, uint32_t sid) { if (!node->value_timestamp) { return; } json_t *top = json_object(); if (!top) { return; } json_t *resps = json_array(); if (!resps) { json_delete(top); return; } json_object_set_new_nocheck(top, "responses", resps); json_t *resp = json_object(); if (!resp) { json_delete(top); return; } json_array_append_new(resps, resp); json_object_set_new_nocheck(resp, "rid", json_integer(0)); json_t *updates = json_array(); if (!updates) { json_delete(top); return; } json_object_set_new_nocheck(resp, "updates", updates); json_t *update = json_array(); if (!update) { json_delete(top); return; } json_array_append_new(updates, update); json_array_append_new(update, json_integer(sid)); json_array_append(update, node->value); json_array_append(update, node->value_timestamp); dslink_ws_send_obj(link->_ws, top); json_delete(top); }
static json_t* net_write_json_presets(void) { json_t* pres = json_object(); json_t* ins; json_t* outs; json_t* l; json_t* m; json_t* o; json_t* p; int i, j; json_object_set(pres, "count", json_integer(NET_PRESETS_MAX)); m = json_array(); for(i=0; i<NET_PRESETS_MAX; i++) { p = json_object(); json_object_set(p, "name", json_string( preset_name(i)) ); l = json_array(); for(j=0; j<PRESET_INODES_COUNT; j++) { /// o = json_object(); json_object_set(o, "enabled", json_integer( presets[i].ins[j].enabled )); /// FIXME: shouldn't need idx here // json_object_set(o, "idx", json_integer( presets[i].ins[j].idx )); /// store for readibility anyhow json_object_set(o,"idx", json_integer( j )); json_object_set(o, "value", json_integer( presets[i].ins[j].value )); json_array_append(l, o); } json_object_set(p, "ins", l); l = json_array(); for(j=0; j<NET_OUTS_MAX; j++) { o = json_object(); /// FIXME: shouldn't need idx here // json_object_set(o, "idx", json_integer( presets[i].outs[j].outIdx )); /// store for readibility anyhow json_object_set(o,"idx", json_integer( j )); json_object_set(o, "target", json_integer( presets[i].outs[j].target )); json_object_set(o, "enabled", json_integer( presets[i].outs[j].enabled )); json_array_append(l, o); } json_object_set(pres, "outs", l); json_array_append(m, p); } json_object_set(pres, "data", m); return pres; }
static void test_ensure_ascii() { int i; int num_tests = sizeof(test_ensure_ascii_data) / sizeof(const char *) / 2; for(i = 0; i < num_tests; i++) { json_t *array, *string; const char *input, *output; char *result, *stripped; input = test_ensure_ascii_data[i][0]; output = test_ensure_ascii_data[i][1]; array = json_array(); string = json_string(input); if(!array || !string) fail("unable to create json values"); json_array_append(array, string); result = json_dumps(array, JSON_ENSURE_ASCII); /* strip leading [" and trailing "] */ stripped = &result[2]; stripped[strlen(stripped) - 2] = '\0'; if(strcmp(stripped, output) != 0) { free(result); fail("the result of json_dumps is invalid"); } free(result); } }
void saveRevenueMovements(char * save_path) { json_t * json_revenue = json_object(); json_t * array = json_array(); json_t * json_total = json_real(total); for (struct revenue * revenue = firstRevenueMovement; revenue!=0; revenue=revenue->next) { json_array_append(array, createRevenueArray(revenue)); } int i = json_object_set(json_revenue, "total", json_total); int j = json_object_set(json_revenue, "revenue", array); if (i == -1 || j == -1) { printf("errore non è possibile scrivere il file"); } json_dump_file(json_revenue, strcat(save_path, "/revenue.json"), JSON_INDENT(3)); }
void init_json() { arr = json_array(); MYSQL_ROW row1, row2; MYSQL_RES *res1, *res2; int i, j = 0; char *fields[100]; mysql_query(conn,"CREATE TABLE temp LIKE details"); mysql_query(conn,"INSERT INTO temp (SELECT * FROM details)"); mysql_query(conn,"ALTER TABLE temp DROP COLUMN num"); mysql_query(conn, "SHOW FIELDS FROM temp"); res1 = mysql_store_result(conn); while ((row1 = mysql_fetch_row(res1)) != NULL) { fields[j] = row1[0]; j++; } mysql_query(conn,"SELECT * FROM temp"); res2 = mysql_store_result(conn); while ((row2 = mysql_fetch_row(res2)) != NULL) { json_t *obj = json_object(); for (i = 0 ; i < mysql_num_fields(res2) ; i++) { if (row2[i] == NULL) { row2[i] = " ";s } json_object_set(obj, fields[i], json_string(row2[i])); } json_array_append(arr, obj); }
static json_t* net_write_json_params(void) { json_t *params = json_object(); json_t *l = json_array(); json_t* o; int i; json_object_set(params, "count", json_integer(net->numParams)); for(i=0; i<net->numParams; i++) { o = json_object(); json_object_set(o, "idx", json_integer(i)); json_object_set(o, "label", json_string(net->params[i].desc.label)); json_object_set(o, "type", json_integer(net->params[i].desc.type)); json_object_set(o, "min", json_integer(net->params[i].desc.min)); json_object_set(o, "max", json_integer(net->params[i].desc.max)); json_object_set(o, "value", json_integer(net->params[i].data.value)); /// FIXME: this dumb indexing. play flag not stored correctly... json_object_set(o, "play", json_boolean(net_get_in_play(i + net->numIns))); json_array_append(l, o); } json_object_set(params, "data", l); return params; }
void node_received(char *line) { fprintf(stderr, "app: Received: '%s'\n", line); json_error_t error; json_t *incoming = json_loads(line, 0, &error); const char *command = json_string_value(json_array_get(incoming, 0)); assert(command); json_t *arg = json_array_get(incoming, 1); msg_func_t handler = find_msg_handler(command); if (handler == NULL) { fprintf(stderr, "app: Unknown command received: '%s'", command); exit(1); } else { json_t *response = handler(arg); if (json_array_size(incoming) > 2) { json_t *response_command = json_array_get(incoming, 2); json_t *array = json_array(); json_array_append(array, response_command); if (response) json_array_append_new(array, response); else json_array_append_new(array, json_null()); node_send_json(array); } else { if (response) json_decref(response); } } json_decref(incoming); free(line); }
irods::error get_query_array( rsComm_t* _comm, json_t*& _queries ) { if( !_comm ) { return ERROR( SYS_INVALID_INPUT_PARAM, "comm is null" ); } _queries = json_array(); if ( !_queries ) { return ERROR( SYS_MALLOC_ERR, "allocation of json object failed" ); } specificQueryInp_t spec_inp; memset( &spec_inp, 0, sizeof( specificQueryInp_t ) ); spec_inp.maxRows = MAX_SQL_ROWS; spec_inp.continueInx = 0; spec_inp.sql = "ls"; genQueryOut_t* gen_out = 0; int status = rsSpecificQuery( _comm, &spec_inp, &gen_out ); if( status < 0 ) { return ERROR( status, "rsSpecificQuery for 'ls' failed" ); } // first attribute is the alias of the specific query int len = gen_out->sqlResult[ 0 ].len; char* values = gen_out->sqlResult[ 0 ].value; for( int i = 0 ; i < gen_out->rowCnt ; ++i ) { char* alias = &values[ len * i ]; if( !alias ) { rodsLog( LOG_ERROR, "get_query_array - alias at %d is null", i ); continue; } json_array_append( _queries, json_string( alias ) ); } // for i freeGenQueryOut( &gen_out ); return SUCCESS(); } // get_query_array
void Json::append(const Json& value) { if (isArray()) { json_array_append(m_json, value.m_json); } else { throw std::domain_error("cannot use appendNew with this json type"); } }
json_t *get_bridge_name(char *bridge_name) { DIR *dir; struct dirent *dp; char path[512]; strcpy(path,"/sys/class/net/"); strcat(path,bridge_name); strcat(path,"/brif/"); if ((dir = opendir(path))==NULL) { perror("opendir"); exit(-1); } json_t *bridge_json = json_object(); json_t *bridges_json = json_array(); int max_bridge = 0; for (dp = readdir(dir); dp != NULL; dp = readdir(dir)) { if (max_bridge > 1) { json_array_append(bridges_json, json_string(dp->d_name)); } max_bridge++; } closedir(dir); json_object_set_new(bridge_json, "slave", bridges_json); return bridge_json; }
json_t * ins_serialize (struct _ins * ins) { json_t * json = json_object(); json_t * bytes = json_array(); int i; for (i = 0; i < ins->size; i++) { json_array_append(bytes, json_integer(ins->bytes[i])); } json_object_set(json, "ot", json_integer(SERIALIZE_INSTRUCTION)); json_object_set(json, "address", json_uint64_t(ins->address)); json_object_set(json, "target", json_uint64_t(ins->target)); json_object_set(json, "bytes", bytes); if (ins->description == NULL) json_object_set(json, "description", json_string("")); else json_object_set(json, "description", json_string(ins->description)); if (ins->comment == NULL) json_object_set(json, "comment", json_string("")); else json_object_set(json, "comment", json_string(ins->comment)); json_object_set(json, "flags", json_integer(ins->flags)); json_object_set(json, "references", object_serialize(ins->references)); return json; }
/** Callback for LBFGS optimization to show progress * @param[in] instance The user data passed to the LBFGS optimizer * @param[in] x The current variable assignments * @param[in] g The current gradients * @param[in] fx The current negative log-likelihood value * @param[in] xnorm The euclidean norm of the variables * @param[in] gnorm The euclidean norm of the gradients * @param[in] step The step size for the current iteration * @param[in] nvar The number of variables * @param[in] k The number of the current iteration * @param[in] ls The number of evaluations called for the current iteration */ static int progress( void *instance, const conjugrad_float_t *x, const conjugrad_float_t *g, const conjugrad_float_t fx, const conjugrad_float_t xnorm, const conjugrad_float_t gnorm, const conjugrad_float_t step, int n, int k, int ls ) { //printf("iter\teval\tf(x) \t║x║ \t║g║ \tstep\n"); printf("%-4d\t%-4d\t%-8g\t%-8g\t%-8.8g\t%-3.3g\n", k, ls, fx, xnorm, gnorm, step); #ifdef JANSSON userdata *ud = (userdata *)instance; json_t *meta_steps = (json_t *)ud->meta_steps; json_t *ms = json_object(); json_object_set(ms, "iteration", json_integer(k)); json_object_set(ms, "eval", json_integer(ls)); json_object_set(ms, "fx", json_real(fx)); json_object_set(ms, "xnorm", json_real(xnorm)); json_object_set(ms, "gnorm", json_real(gnorm)); json_object_set(ms, "step", json_real(step)); json_array_append(meta_steps, ms); #endif return 0; }
Json Json::setArrayMember( const int index, const Json member ) { while ( index >= json_array_size( data ) ) json_array_append( data, json_null() ); json_array_set( data, index, member.data ); return json_array_get( data, index ); }
// sets a json value for the next index int json_array_add_object(json_t *array, json_t *value, json_context_t *json_context) { int exit_code = 0; check_not_null(array); check_not_null(json_context); if (value != NULL) { check_result(json_array_append_new(array, value), 0); } else { check_result(json_array_append(array, json_null()), 0); } goto cleanup; error: exit_code = -1; cleanup: return exit_code; }
// sets a json value for the next index int json_array_add_int(json_t *array, int *value, json_context_t *json_context) { int exit_code = 0; json_t *json_integer_value = NULL; check_not_null(array); check_not_null(json_context); if (value != NULL) { json_integer_value = json_integer(*value); check_not_null(json_integer_value); check_result(json_array_append_new(array, json_integer_value), 0); json_integer_value = NULL; } else { check_result(json_array_append(array, json_null()), 0); } goto cleanup; error: exit_code = -1; cleanup: if (json_integer_value != NULL) { json_free(json_integer_value); } return exit_code; }
//native json_array_append_new(Handle:hArray, Handle:hValue); static cell_t Native_json_array_append_new(IPluginContext *pContext, const cell_t *params) { HandleError err; HandleSecurity sec; sec.pOwner = NULL; sec.pIdentity = myself->GetIdentity(); // Param 1: hArray json_t *object; Handle_t hndlObject = static_cast<Handle_t>(params[1]); if ((err=g_pHandleSys->ReadHandle(hndlObject, htJanssonObject, &sec, (void **)&object)) != HandleError_None) { return pContext->ThrowNativeError("Invalid <Array> handle %x (error %d)", hndlObject, err); } // Param 2: hValue json_t *value; Handle_t hndlValue = static_cast<Handle_t>(params[2]); if ((err=g_pHandleSys->ReadHandle(hndlValue, htJanssonObject, &sec, (void **)&value)) != HandleError_None) { return pContext->ThrowNativeError("Invalid JSON handle %x (error %d)", hndlObject, err); } bool bSuccess = (json_array_append(object, value) == 0); if(bSuccess) { if ((err=g_pHandleSys->FreeHandle(hndlValue, NULL)) != HandleError_None) { pContext->ThrowNativeError("Could not free <Object> handle %x (error %d)", hndlValue, err); return false; } } return bSuccess; }
void event(void) { json_t* object; json_t* array_text_list; json_t* array_events; object = json_object(); array_events = json_array(); json_object_set(object, "events", array_events); json_decref(array_events); for (int f = 0; f < 58; f++) { const TableInfo* event_info; event_info = &table_info_event[f]; std::string filename = "Extracted/" + std::string(event_info->filename); Event event(filename.c_str(), event_info->name, event_info->offset, event_info->length); json_t* object_event; json_t* json_value; object_event = json_object(); json_array_append(array_events, object_event); json_value = json_string(event_info->name); json_object_set(object_event, "name", json_value); json_decref(json_value); json_value = json_string(event_info->filename); json_object_set(object_event, "filename", json_value); json_decref(json_value); json_value = json_integer(event_info->offset); json_object_set(object_event, "offset", json_value); json_decref(json_value); json_value = json_integer(event_info->length); json_object_set(object_event, "length", json_value); json_decref(json_value); array_text_list = json_array(); json_object_set(object_event, "text_list", array_text_list); json_decref(array_text_list); for (std::vector<Text*>::iterator tt = event.text_data()->begin(); tt != event.text_data()->end(); tt++) { Text2JSON t2json(*(*tt), array_text_list); t2json.output(); } } json_dump_file(object, "event.json", JSON_INDENT(0) | JSON_PRESERVE_ORDER); json_decref(object); }
wbBool wbSolution(wbArg_t arg, void * data, int rows, int columns) { int ii; char * type; wbBool res; json_t * msg; char * expectedOutputFile; char * outputFile; json_t * inputFileArray; expectedOutputFile = wbArg_getExpectedOutputFile(arg); outputFile = wbArg_getOutputFile(arg); type = wbArg_getType(arg); wbAssert(type != NULL); wbAssert(expectedOutputFile != NULL); wbAssert(outputFile != NULL); res = wbSolution(expectedOutputFile, outputFile, type, data, rows, columns); #if 1 if (res) { _solution_correctQ = json_object(); msg = json_string("Solution is correct."); json_object_set(_solution_correctQ, "CorrectQ", json_true()); json_object_set(_solution_correctQ, "Message", msg); } else { msg = _solution_correctQ; _solution_correctQ = json_object(); json_object_set(_solution_correctQ, "CorrectQ", json_false()); json_object_set(_solution_correctQ, "Message", msg); } #else if (res) { _solution_correctQ = json_true(); } else { _solution_correctQ = json_false(); } #endif inputFileArray = json_array(); for (ii = 0; ii < wbArg_getInputCount(arg); ii++) { char * file = wbArg_getInputFile(arg, ii); if (file != NULL) { json_array_append(inputFileArray, json_string(file)); } } /* json_object_set(_solution_correctQ, "InputFiles", inputFileArray); json_object_set(_solution_correctQ, "ExpectedOutput", json_string(expectedOutputFile)); json_object_set(_solution_correctQ, "OutputFile", json_string(outputFile)); */ return res; }
static void test_circular() { json_t *array1, *array2; /* the simple cases are checked */ array1 = json_array(); if(!array1) fail("unable to create array"); if(json_array_append(array1, array1) == 0) fail("able to append self"); if(json_array_insert(array1, 0, array1) == 0) fail("able to insert self"); if(json_array_append_new(array1, json_true())) fail("failed to append true"); if(json_array_set(array1, 0, array1) == 0) fail("able to set self"); json_decref(array1); /* create circular references */ array1 = json_array(); array2 = json_array(); if(!array1 || !array2) fail("unable to create array"); if(json_array_append(array1, array2) || json_array_append(array2, array1)) fail("unable to append"); /* circularity is detected when dumping */ if(json_dumps(array1, 0) != NULL) fail("able to dump circulars"); /* decref twice to deal with the circular references */ json_decref(array1); json_decref(array2); json_decref(array1); }
static json_t* net_write_json_ops(void) { json_t* ops = json_object(); json_t* o; json_t* l = json_array(); op_t* op; int i, j; // binary blob for operator state // a large but arbitrary maximum size! u8 bin[0x10000]; int binCount; json_t* state; u8* dst; json_object_set(ops, "count", json_integer(net->numOps)); for(i=0; i<net->numOps; i++) { o = json_object(); op = net->ops[i]; json_object_set(o, "idx", json_integer(i)); json_object_set(o, "type", json_integer(op->type)); json_object_set(o, "name", json_string(op->opString)); json_object_set(o, "numIns", json_integer(op->numInputs)); json_object_set(o, "numOuts", json_integer(op->numOutputs)); /// ok, operator state data is going to be weird. /// we could write a proper json parser for each operator type (insane.) /// but for now i am just going to use the operator pickling/unpickling functions, /// and stick an ugly byte-array in the json. sorry! if(op->pickle == NULL) { // no state binCount = 0; } else { dst = bin; dst = (*(op->pickle))(op, dst); binCount = (u32)dst - (u32)(bin); } state = json_array(); for(j=0; j<binCount; j++) { json_array_append(state, json_integer(bin[j])); } json_object_set(o, "state", state); json_array_append(l, o); } json_object_set(ops, "data", l); return ops; }
static void test_extend(void) { json_t *array1, *array2, *five, *seven; int i; array1 = json_array(); array2 = json_array(); five = json_integer(5); seven = json_integer(7); if(!array1 || !array2) fail("unable to create array"); if(!five || !seven) fail("unable to create integer"); for(i = 0; i < 10; i++) { if(json_array_append(array1, five)) fail("unable to append"); } for(i = 0; i < 10; i++) { if(json_array_append(array2, seven)) fail("unable to append"); } if(json_array_size(array1) != 10 || json_array_size(array2) != 10) fail("array size is invalid after appending"); if(json_array_extend(array1, array2)) fail("unable to extend"); for(i = 0; i < 10; i++) { if(json_array_get(array1, i) != five) fail("invalid array contents after extending"); } for(i = 10; i < 20; i++) { if(json_array_get(array1, i) != seven) fail("invalid array contents after extending"); } json_decref(five); json_decref(seven); json_decref(array1); json_decref(array2); }
void pss_response_searchResponse(req_t * req, json_t * response, int32_t requestId, void *sgraph) { //store the locations and request the content req->response = response; json_t *nodeArray = json_object_get(response, "nodeArray"); json_t *idArray = json_array(); json_t *ancestorIdArray = json_array(); int i; for (i = 0; i < json_array_size(nodeArray); i++) { json_array_append(idArray, json_object_get(json_array_get (nodeArray, i), "id")); json_array_append(ancestorIdArray, json_object_get(json_array_get (nodeArray, i), "ancestorId")); } json_t *graph_request = json_object(); json_object_set_new(graph_request, "requestId", json_integer(requestId)); json_t *retrieveRequest = json_object(); json_object_set_new(retrieveRequest, "type", json_string("retrieveRequest")); json_object_set_new(retrieveRequest, "idArray", idArray); json_object_set_new(retrieveRequest, "ancestorIdArray", ancestorIdArray); json_object_set_new(graph_request, "request", retrieveRequest); zmsg_t *mreq = zmsg_new(); char *graph_req_str = json_dumps(graph_request, JSON_COMPACT); printf("\nbroker:sgraph sent: %s\n", graph_req_str); zmsg_addstr(mreq, graph_req_str); free(graph_req_str); zmsg_send(&mreq, sgraph); json_decref(graph_request); }
int jt_stats_packer(void *data, char **out) { struct timespec mts; /* message timestamp */ struct jt_msg_stats *stats_msg = data; json_t *t = json_object(); json_t *samples_arr = json_array(); json_t *params = json_object(); json_t *jmts = json_object(); json_object_set_new(params, "iface", json_string(stats_msg->iface)); json_object_set_new(params, "whoosh_err_mean", json_integer(stats_msg->err.mean)); json_object_set_new(params, "whoosh_err_max", json_integer(stats_msg->err.max)); json_object_set_new(params, "whoosh_err_sd", json_integer(stats_msg->err.sd)); json_t *sample[stats_msg->sample_count]; // order matters! for (int i = 0; i < stats_msg->sample_count; i++) { sample[i] = json_object(); json_object_set_new(sample[i], "rx", json_integer(stats_msg->samples[i].rx)); json_object_set_new(sample[i], "tx", json_integer(stats_msg->samples[i].tx)); json_object_set_new(sample[i], "rxP", json_integer(stats_msg->samples[i].rxPkt)); json_object_set_new(sample[i], "txP", json_integer(stats_msg->samples[i].txPkt)); json_array_append(samples_arr, sample[i]); } json_object_set_new( t, "msg", json_string(jt_messages[JT_MSG_STATS_V1].key)); json_object_set(params, "s", samples_arr); json_object_set(t, "p", params); /* timestamp the new message */ clock_gettime(CLOCK_MONOTONIC, &mts); json_object_set_new(jmts, "tv_sec", json_integer(mts.tv_sec)); json_object_set_new(jmts, "tv_nsec", json_integer(mts.tv_nsec)); json_object_set_new(params, "t", jmts); *out = json_dumps(t, 0); for (int i = 0; i < stats_msg->sample_count; i++) { json_decref(sample[i]); } json_array_clear(samples_arr); json_decref(samples_arr); json_object_clear(params); json_decref(params); json_object_clear(t); json_decref(t); return 0; }