void OUTPUT_FORMATTER::json_finalize_result(bool result) { POOL_MEM string; json_t *msg_obj = json_object(); json_t *error_obj; /* * We mimic json-rpc result and error messages, * To make it easier to implement real json-rpc later on. */ json_object_set(msg_obj, "jsonrpc", json_string("2.0")); json_object_set(msg_obj, "id", json_null()); if (result) { json_object_set(msg_obj, "result", result_array_json); } else { error_obj = json_object(); json_object_set_new(error_obj, "code", json_integer(1)); json_object_set_new(error_obj, "message", json_string("failed")); json_object_set(error_obj, "data", result_array_json); json_object_set_new(msg_obj, "error", error_obj); } string.bsprintf("%s\n", json_dumps(msg_obj, UA_JSON_FLAGS)); send_func(send_ctx, string.c_str()); json_array_clear(result_array_json); json_object_clear(msg_obj); }
int jt_stats_packer(void *data, char **out) { struct timespec mts; /* message timestamp */ struct jt_msg_stats *stats_msg = data; json_t *t = json_object(); json_t *samples_arr = json_array(); json_t *params = json_object(); json_t *jmts = json_object(); json_object_set_new(params, "iface", json_string(stats_msg->iface)); json_object_set_new(params, "whoosh_err_mean", json_integer(stats_msg->err.mean)); json_object_set_new(params, "whoosh_err_max", json_integer(stats_msg->err.max)); json_object_set_new(params, "whoosh_err_sd", json_integer(stats_msg->err.sd)); json_t *sample[stats_msg->sample_count]; // order matters! for (int i = 0; i < stats_msg->sample_count; i++) { sample[i] = json_object(); json_object_set_new(sample[i], "rx", json_integer(stats_msg->samples[i].rx)); json_object_set_new(sample[i], "tx", json_integer(stats_msg->samples[i].tx)); json_object_set_new(sample[i], "rxP", json_integer(stats_msg->samples[i].rxPkt)); json_object_set_new(sample[i], "txP", json_integer(stats_msg->samples[i].txPkt)); json_array_append(samples_arr, sample[i]); } json_object_set_new( t, "msg", json_string(jt_messages[JT_MSG_STATS_V1].key)); json_object_set(params, "s", samples_arr); json_object_set(t, "p", params); /* timestamp the new message */ clock_gettime(CLOCK_MONOTONIC, &mts); json_object_set_new(jmts, "tv_sec", json_integer(mts.tv_sec)); json_object_set_new(jmts, "tv_nsec", json_integer(mts.tv_nsec)); json_object_set_new(params, "t", jmts); *out = json_dumps(t, 0); for (int i = 0; i < stats_msg->sample_count; i++) { json_decref(sample[i]); } json_array_clear(samples_arr); json_decref(samples_arr); json_object_clear(params); json_decref(params); json_object_clear(t); json_decref(t); return 0; }
bool DestroyHandlerUtil (void) { bool success_flag = true; if (s_mapped_filenames_p) { size_t handler_index; json_t *handler_p; json_array_foreach (s_mapped_filenames_p, handler_index, handler_p) { const char *user_key_s; json_t *user_p; json_object_foreach (handler_p, user_key_s, user_p) { const char *obj_key_s; json_t *obj_p; json_object_foreach (user_p, obj_key_s, obj_p) { json_t *filename_p = json_object_get (obj_p, S_FILENAME_KEY_S); if (filename_p) { if (json_is_string (filename_p)) { const char *filename_s = json_string_value (filename_p); if (!RemoveFile (filename_s)) { } } } /* if (filename_p) */ } /* json_object_foreach (user_p, obj_index, obj_p) */ } /* json_object_foreach (handler_p, user_index, user_p) */ } /* json_array_foreach (s_mapped_filenames_p, handler_index, handler_p) */ if (json_array_clear (s_mapped_filenames_p) == 0) { json_decref (s_mapped_filenames_p); } else { success_flag = false; } }
struct samba3_user *getsamba3users(void) { struct samba3_user *list, *current; int count, entry_count; json_t *json; json_error_t error; const char *key; json_t *iterator, *entry, *value; char username[64],password[64]; int type; // first create dummy entry list = getsamba3user("", "", 0); current = list; //json = json_loads( "[{\"user\":\"peter\",\"pass\":\"test\"},{\"user\":\"chris\",\"pass\":\"test\"}]", &error ); json = json_loads( nvram_default_get( "samba3_users", "[]"), 0, &error); if( !json ) { fprintf( stderr, "[JASON] ERROR\n"); } else { entry_count = json_array_size(json); for( count = 0; count < entry_count; count++ ) { entry = json_array_get( json, count ); iterator = json_object_iter(entry); // reset username[0] = 0; password[0] = 0; while(iterator) { key = json_object_iter_key(iterator); value = json_object_iter_value(iterator); /* use key and value ... */ if( !strcmp( key, "user" ) ) { strncpy( username, json_string_value( value ),sizeof(username)-1); } else if( !strcmp( key, "pass" ) ) { strncpy( password, json_string_value( value ),sizeof(password)-1); } else if( !strcmp( key, "type" ) ) { type = json_integer_value( value ); } iterator = json_object_iter_next(entry, iterator); } if( username[0] != 0 ) { current->next = getsamba3user(username, password, type); current = current->next; } } json_array_clear(json); } return list; }
struct dlna_share *getdlnashares(void) { struct dlna_share *list, *current; int count, entry_count; json_t *json; json_error_t error; const char *key; json_t *iterator, *entry, *value; char mp[64], types; // first create dummy entry list = getdlnashare("", 0); current = list; // json = json_loads( "[{\"mp\":\"/jffs\",\"label\":\"testshare\",\"perms\":\"rw\",\"public\":0},{\"mp\":\"/mnt\",\"label\":\"othertest\",\"perms\":\"ro\",\"public\":1},{\"label\":\"blah\"}]", &error ); json = json_loads(nvram_default_get("dlna_shares", "[]"), 0, &error); if (!json) { fprintf(stderr, "[JASON] ERROR\n"); } else { entry_count = json_array_size(json); for (count = 0; count < entry_count; count++) { entry = json_array_get(json, count); iterator = json_object_iter(entry); // reset mp[0] = 0; types = 0; while (iterator) { key = json_object_iter_key(iterator); value = json_object_iter_value(iterator); /* use key and value ... */ if (!strcmp(key, "mp")) { strncpy(mp, json_string_value(value), sizeof(mp) - 1); } else if (!strcmp(key, "types")) { types = json_integer_value(value); } iterator = json_object_iter_next(entry, iterator); } if (mp[0] != 0) { current->next = getdlnashare(mp, types); current = current->next; } } json_array_clear(json); } return list; }
//native json_array_clear(Handle:hArray); static cell_t Native_json_array_clear(IPluginContext *pContext, const cell_t *params) { HandleError err; HandleSecurity sec; sec.pOwner = NULL; sec.pIdentity = myself->GetIdentity(); // Param 1: hArray json_t *object; Handle_t hndlObject = static_cast<Handle_t>(params[1]); if ((err=g_pHandleSys->ReadHandle(hndlObject, htJanssonObject, &sec, (void **)&object)) != HandleError_None) { return pContext->ThrowNativeError("Invalid <Array> handle %x (error %d)", hndlObject, err); } return (json_array_clear(object) == 0); }
static void test_clear(void) { json_t *array, *five, *seven; int i; array = json_array(); five = json_integer(5); seven = json_integer(7); if(!array) fail("unable to create array"); if(!five || !seven) fail("unable to create integer"); for(i = 0; i < 10; i++) { if(json_array_append(array, five)) fail("unable to append"); } for(i = 0; i < 10; i++) { if(json_array_append(array, seven)) fail("unable to append"); } if(json_array_size(array) != 20) fail("array size is invalid after appending"); if(json_array_clear(array)) fail("unable to clear"); if(json_array_size(array) != 0) fail("array size is invalid after clearing"); json_decref(five); json_decref(seven); json_decref(array); }
void kafka_cache_purge(struct chained_cache *queue[], int index) { struct pkt_primitives *data = NULL; struct pkt_bgp_primitives *pbgp = NULL; struct pkt_nat_primitives *pnat = NULL; struct pkt_mpls_primitives *pmpls = NULL; char *pcust = NULL; struct pkt_vlen_hdr_primitives *pvlen = NULL; struct pkt_bgp_primitives empty_pbgp; struct pkt_nat_primitives empty_pnat; struct pkt_mpls_primitives empty_pmpls; char *empty_pcust = NULL; char src_mac[18], dst_mac[18], src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN], ip_address[INET6_ADDRSTRLEN]; char rd_str[SRVBUFLEN], misc_str[SRVBUFLEN], dyn_kafka_topic[SRVBUFLEN], *orig_kafka_topic = NULL; int i, j, stop, batch_idx, is_topic_dyn = FALSE, qn = 0, ret, saved_index = index; int mv_num = 0, mv_num_save = 0; time_t start, duration; pid_t writer_pid = getpid(); #ifdef WITH_JANSSON json_t *array = json_array(); #endif p_kafka_init_host(&kafkap_kafka_host); /* setting some defaults */ if (!config.sql_host) config.sql_host = default_kafka_broker_host; if (!config.kafka_broker_port) config.kafka_broker_port = default_kafka_broker_port; if (!config.sql_table) config.sql_table = default_kafka_topic; else { if (strchr(config.sql_table, '$')) { is_topic_dyn = TRUE; orig_kafka_topic = config.sql_table; config.sql_table = dyn_kafka_topic; } } if (config.amqp_routing_key_rr) { orig_kafka_topic = config.sql_table; config.sql_table = dyn_kafka_topic; } p_kafka_init_topic_rr(&kafkap_kafka_host); p_kafka_set_topic_rr(&kafkap_kafka_host, config.amqp_routing_key_rr); empty_pcust = malloc(config.cpptrs.len); if (!empty_pcust) { Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() empty_pcust. Exiting.\n", config.name, config.type); exit_plugin(1); } memset(&empty_pbgp, 0, sizeof(struct pkt_bgp_primitives)); memset(&empty_pnat, 0, sizeof(struct pkt_nat_primitives)); memset(&empty_pmpls, 0, sizeof(struct pkt_mpls_primitives)); memset(empty_pcust, 0, config.cpptrs.len); p_kafka_connect_to_produce(&kafkap_kafka_host); p_kafka_set_broker(&kafkap_kafka_host, config.sql_host, config.kafka_broker_port); p_kafka_set_topic(&kafkap_kafka_host, config.sql_table); p_kafka_set_partition(&kafkap_kafka_host, config.kafka_partition); p_kafka_set_key(&kafkap_kafka_host, config.kafka_partition_key, config.kafka_partition_keylen); p_kafka_set_content_type(&kafkap_kafka_host, PM_KAFKA_CNT_TYPE_STR); for (j = 0, stop = 0; (!stop) && P_preprocess_funcs[j]; j++) stop = P_preprocess_funcs[j](queue, &index, j); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - START (PID: %u) ***\n", config.name, config.type, writer_pid); start = time(NULL); if (config.print_markers) { void *json_obj; char *json_str; json_obj = compose_purge_init_json(writer_pid); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } for (j = 0; j < index; j++) { void *json_obj; char *json_str; if (queue[j]->valid != PRINT_CACHE_COMMITTED) continue; data = &queue[j]->primitives; if (queue[j]->pbgp) pbgp = queue[j]->pbgp; else pbgp = &empty_pbgp; if (queue[j]->pnat) pnat = queue[j]->pnat; else pnat = &empty_pnat; if (queue[j]->pmpls) pmpls = queue[j]->pmpls; else pmpls = &empty_pmpls; if (queue[j]->pcust) pcust = queue[j]->pcust; else pcust = empty_pcust; if (queue[j]->pvlen) pvlen = queue[j]->pvlen; else pvlen = NULL; if (queue[j]->valid == PRINT_CACHE_FREE) continue; json_obj = compose_json(config.what_to_count, config.what_to_count_2, queue[j]->flow_type, &queue[j]->primitives, pbgp, pnat, pmpls, pcust, pvlen, queue[j]->bytes_counter, queue[j]->packet_counter, queue[j]->flow_counter, queue[j]->tcp_flags, &queue[j]->basetime, queue[j]->stitch); json_str = compose_json_str(json_obj); #ifdef WITH_JANSSON if (json_str && config.sql_multi_values) { json_t *elem = NULL; char *tmp_str = json_str; int do_free = FALSE; if (json_array_size(array) >= config.sql_multi_values) { json_str = json_dumps(array, 0); json_array_clear(array); mv_num_save = mv_num; mv_num = 0; } else do_free = TRUE; elem = json_loads(tmp_str, 0, NULL); json_array_append_new(array, elem); mv_num++; if (do_free) { free(json_str); json_str = NULL; } } #endif if (json_str) { if (is_topic_dyn) { P_handle_table_dyn_strings(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, queue[j]); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } if (config.amqp_routing_key_rr) { P_handle_table_dyn_rr(dyn_kafka_topic, SRVBUFLEN, orig_kafka_topic, &kafkap_kafka_host.topic_rr); p_kafka_set_topic(&kafkap_kafka_host, dyn_kafka_topic); } Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; if (!ret) { if (!config.sql_multi_values) qn++; else qn += mv_num_save; } else break; } } #ifdef WITH_JANSSON if (config.sql_multi_values && json_array_size(array)) { char *json_str; json_str = json_dumps(array, 0); json_array_clear(array); json_decref(array); if (json_str) { /* no handling of dyn routing keys here: not compatible */ Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; if (!ret) qn += mv_num; } } #endif duration = time(NULL)-start; if (config.print_markers) { void *json_obj; char *json_str; json_obj = compose_purge_close_json(writer_pid, qn, saved_index, duration); if (json_obj) json_str = compose_json_str(json_obj); if (json_str) { Log(LOG_DEBUG, "DEBUG ( %s/%s ): %s\n\n", config.name, config.type, json_str); ret = p_kafka_produce_data(&kafkap_kafka_host, json_str, strlen(json_str)); free(json_str); json_str = NULL; } } p_kafka_close(&kafkap_kafka_host, FALSE); Log(LOG_INFO, "INFO ( %s/%s ): *** Purging cache - END (PID: %u, QN: %u/%u, ET: %u) ***\n", config.name, config.type, writer_pid, qn, saved_index, duration); if (config.sql_trigger_exec) P_trigger_exec(config.sql_trigger_exec); if (empty_pcust) free(empty_pcust); }
bool TrafficNetwork::loadFromFile(const string fileName){ int nbBands; long currentNode,otherNode; string roadName; double roadLength; double roadSpeedLimit; int metric; json_t *root; json_error_t error; root = json_load_file(fileName.c_str(), 0, &error); if(!root){ std::cout<<std::endl<<"ERROR: while opening "<<fileName<<" at line "<<error.line<<" - "<<error.text<<std::endl; return false; } if(!json_is_object(root)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - expected root to be an object"<<std::endl; json_decref(root); return false; } json_t *_metric, *_roads, *_nodes; _metric = json_object_get(root,"metric"); if(!json_is_integer(_metric)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'metric' field not present or wrong type"<<std::endl; json_decref(root); return false; } metric = json_integer_value(_metric); _nodes = json_object_get(root,"nodes"); if(!json_is_array(_nodes)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'nodes' field not present or not an array"<<std::endl; json_decref(root); return false; } size_t n = json_array_size(_nodes); nbNodes = n; nodes = NodeVec(nbNodes); json_t *nodeId,*_node, *nodeType; for(size_t i = 0; i < n; i++){ _node = json_array_get(_nodes,i); if(!json_is_object(_node)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - expected node "<<i<<" to be an object"<<std::endl; json_decref(root); return false; } nodeId = json_object_get(_node,"id"); if(!json_is_integer(nodeId)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'id' field of node "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } nodeType = json_object_get(_node,"type"); if(json_is_integer(nodeType)){ nodes[i] = new Node(json_integer_value(nodeId),json_integer_value(nodeType)); }else{ nodes[i] = new Node(json_integer_value(nodeId)); } } _roads = json_object_get(root,"roads"); if(!json_is_array(_roads)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'roads' field not present or not an array"<<std::endl; json_decref(root); return false; } n = json_array_size(_roads); json_t *_roadName,*_roadSpeedLimit,*_roadNbBands,*_roadLength,*_road,*startId,*endId; for(size_t i = 0; i < n; i++){ _road = json_array_get(_roads,i); if(!json_is_object(_road)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - expected road "<<i<<" to be an object"<<std::endl; json_decref(root); return false; } _roadName = json_object_get(_road,"name"); if(!json_is_string(_roadName)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'name' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } roadName = json_string_value(_roadName); _roadSpeedLimit = json_object_get(_road,"speedLimit"); if(!json_is_integer(_roadSpeedLimit)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'speedLimit' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } roadSpeedLimit = formatSpeedLimit(json_integer_value(_roadSpeedLimit),metric); _roadLength = json_object_get(_road,"length"); if(!json_is_real(_roadLength)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'length' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } roadLength = formatLength(json_real_value(_roadLength),metric); _roadNbBands = json_object_get(_road,"nbBands"); if(!json_is_integer(_roadNbBands)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'nbBands' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } nbBands = json_integer_value(_roadNbBands); startId = json_object_get(_road,"startId"); if(!json_is_integer(startId)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'startId' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } currentNode = json_integer_value(startId); endId = json_object_get(_road,"endId"); if(!json_is_integer(endId)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - 'endId' field of road "<<i<<" not present or wrong type"<<std::endl; json_decref(root); return false; } otherNode = json_integer_value(endId); addRoad(currentNode, otherNode, roadName, roadLength, roadSpeedLimit,nbBands); } //clean up json_array_clear(_nodes); json_object_clear(_road); json_array_clear(_roads); json_object_clear(root); json_decref(root); return true; }
bool TrafficNetwork::saveToFile(const string out_prefix, bool newWeek){ bool everythingOK = true; int metric = KPH; //save in KPH such that it can be used by external tool (and be human readable) std::vector<long> nIds; long crtId; char crtType; string out_fileName = (out_prefix+"_roads_stats.json"); json_t *root = NULL; if(firstSaved){ if(fileName != ""){ json_error_t error; root = json_load_file(fileName.c_str(), 0, &error); if(!root){ std::cout<<std::endl<<"WARNING: while opening "<<fileName<<" at line "<<error.line<<" - "<<error.text<<std::endl; root= NULL; } if(!json_is_object(root)){ std::cout<<std::endl<<"WARNING: input file "<<fileName<<" has not the correct structure - expected root to be an object"<<std::endl; json_decref(root); root = NULL; } if(!root){ std::cout<<"File "<<fileName<<" could not be using during saving process"<<std::endl<<"\t --> reverting to saving network based on stored data (possibility for loss of positional infos)"<<std::endl; } } if(!root){ root = json_object(); if ( root ) { json_object_set_new(root, "metric" ,json_integer(metric)); json_t *_nodes = json_array(); json_t *_roads = json_array(); for(NodeVec::iterator it= nodes.begin() ; it!=nodes.end(); ++it){ json_t * _node = json_object(); crtId = (*it)->getId(); crtType = (*it)->getType(); json_object_set_new(_node,"id",json_integer(crtId)); json_object_set_new(_node,"type",json_integer(crtType)); json_array_append_new(_nodes,_node); nIds = (*it)->getNeighborsId(); for(std::vector<long>::iterator jt = nIds.begin(); jt != nIds.end(); ++jt){ Road r = *((*it)->roadTo(*jt)); json_t * _road = json_object(); json_object_set_new(_road,"name",json_string(r.getName().c_str())); json_object_set_new(_road,"startId",json_integer(crtId)); json_object_set_new(_road,"endId",json_integer(r.getEndPoint()->getId())); json_object_set_new(_road,"speedLimit",json_integer(r.getSpeedLimit()*3.6)); //x3.6 to go from MPS to KPH json_object_set_new(_road,"length",json_real(r.getLength()/1000)); // /1000 to go from M to K json_object_set_new(_road,"nbBands",json_integer(r.getNbBands())); json_array_append_new(_roads,_road); } } json_object_set_new(root, "nodes" ,_nodes); json_object_set_new(root, "roads" ,_roads); }else{ std::cout<<"ERROR: Could not create 'root' during saving process"<<std::endl; return false; } } }else{ json_error_t error; root = json_load_file(out_fileName.c_str(), 0, &error); if(!root){ std::cout<<std::endl<<"ERROR: while opening "<<out_fileName<<" at line "<<error.line<<" - "<<error.text<<std::endl; root= NULL; return false; } if(!json_is_object(root)){ std::cout<<std::endl<<"ERROR: input file "<<fileName<<" has not the correct structure - expected root to be an object"<<std::endl; json_decref(root); root = NULL; return false; } } json_t *roadsInfos; if(monitered){ bool first = false; if(firstSaved){ roadsInfos = json_array(); int nbRoads = json_array_size(json_object_get(root,"roads")); for(int i = 0; i < nbRoads; i++){ json_array_append_new(roadsInfos,json_object()); } json_object_set(root,"roadsInfos",roadsInfos); json_object_set_new(root,"timePrecision",json_integer(TIME_PRECISION)); json_object_set_new(root,"time_index",json_integer(0)); json_object_set_new(root,"driversCount_index",json_integer(1)); firstSaved = false; first = true; }else roadsInfos = json_object_get(root,"roadsInfos"); json_t *infos; for(NodeVec::iterator it= nodes.begin() ; it!=nodes.end(); ++it){ nIds = (*it)->getNeighborsId(); for(std::vector<long>::iterator jt = nIds.begin(); jt != nIds.end(); ++jt){ Road* r = ((*it)->roadTo(*jt)); infos = r->getMonitor()->getInfos(); if(first){ json_object_update(json_array_get(roadsInfos,r->getId()),infos); }else{ json_array_extend(json_object_get(json_array_get(roadsInfos,r->getId()),"data"),json_object_get(infos,"data")); } r->getMonitor()->resetInfos(newWeek); json_object_clear(infos); json_decref(infos); } } } //actually save if(!(json_dump_file(root,out_fileName.c_str(),JSON_COMPACT) == 0)){ //if(!(json_dump_file(root,out_fileName.c_str(),JSON_INDENT(2)) == 0)){ //<== to have pretty JSON file everythingOK = false; std::cout<< "Could not open file : "<<out_fileName << " to write down network "<< name <<std::endl; } if(monitered){ json_array_clear(roadsInfos); } json_object_clear(root); json_decref(root); if(newWeek){ firstSaved = true; } return everythingOK; }
int ast_json_array_clear(struct ast_json *array) { return json_array_clear((json_t *)array); }
int la_codec_array_clear(la_codec_value_t *array) { return json_array_clear((json_t*) array); }