static void parse_zookeeper_json(struct _main_info *main_info, struct _worker_info *worker_info, json_object *zk_config) { char *host = NULL; int64_t pop_watcher_timeout = 0, push_timeout = 0; json_object *zk_sensors = NULL; json_object_object_foreach(zk_config, key, val) { if (0 == strcmp(key, "host")) { host = strdup(json_object_get_string(val)); } else if (0 == strcmp(key, "pop_watcher_timeout")) { pop_watcher_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "push_timeout")) { push_timeout = json_object_get_int64(val); } else if (0 == strcmp(key, "sensors")) { zk_sensors = val; } else { rdlog(LOG_ERR, "Don't know what zookeeper config.%s " "key means.", key); } if (errno != 0) { rdlog(LOG_ERR, "Could not parse %s value: %s", key, strerror(errno)); return; } } if (!host) { rdlog(LOG_ERR, "No zookeeper host specified. Can't use ZK."); return; } else if (0 == push_timeout) { rdlog(LOG_INFO, "No pop push_timeout specified. We will never " "be ZK masters."); return; } else if (push_timeout < 0) { rdlog(LOG_ERR, "Can't set a zk push timeout < 0 (%" PRId64 ")", push_timeout); return; } else if (pop_watcher_timeout < 0) { rdlog(LOG_ERR, "Can't set a zk pop timeout < 0 (%" PRId64 ")", pop_watcher_timeout); } main_info->zk = init_rbmon_zk(host, (uint64_t)pop_watcher_timeout, (uint64_t)push_timeout, zk_sensors, worker_info->queue); }
GError* m2v2_json_load_single_header (struct json_object *j, gpointer *pbean) { GError *err = NULL; GByteArray *id = NULL, *hash = NULL; struct bean_CONTENTS_HEADERS_s *header = NULL; struct json_object *jid, *jhash, *jsize, *jctime, *jmtime, *jmethod, *jtype; struct oio_ext_json_mapping_s mapping[] = { {"id", &jid, json_type_string, 1}, {"hash", &jhash, json_type_string, 1}, {"size", &jsize, json_type_int, 1}, {"ctime", &jctime, json_type_int, 0}, {"mtime", &jmtime, json_type_int, 0}, {"chunk-method", &jmethod, json_type_string, 1}, {"mime-type", &jtype, json_type_string, 1}, {NULL, NULL, 0, 0} }; *pbean = NULL; if (NULL != (err = oio_ext_extract_json (j, mapping))) return err; id = metautils_gba_from_hexstring(json_object_get_string(jid)); if (!id) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid header, not hexa id"); goto exit; } hash = metautils_gba_from_hexstring(json_object_get_string(jhash)); if (!hash || hash->len != 16) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid header, not hexa16 hash"); goto exit; } header = _bean_create (&descr_struct_CONTENTS_HEADERS); CONTENTS_HEADERS_set2_id (header, id->data, id->len); CONTENTS_HEADERS_set2_hash (header, hash->data, hash->len); CONTENTS_HEADERS_set_size (header, json_object_get_int64(jsize)); if (jctime) CONTENTS_HEADERS_set_ctime (header, json_object_get_int64(jctime)); if (jmtime) CONTENTS_HEADERS_set_mtime (header, json_object_get_int64(jmtime)); CONTENTS_HEADERS_set2_chunk_method (header, json_object_get_string(jmethod)); CONTENTS_HEADERS_set2_mime_type (header, json_object_get_string(jtype)); *pbean = header; header = NULL; exit: metautils_gba_unref (id); metautils_gba_unref (hash); _bean_clean (header); return err; }
GError* m2v2_json_load_single_chunk (struct json_object *j, gpointer *pbean) { GError *err = NULL; GByteArray *hid = NULL, *hash = NULL; struct bean_CHUNKS_s *chunk = NULL; struct json_object *jid, *jcontent, *jhash, *jsize, *jctime, *jpos; struct oio_ext_json_mapping_s mapping[] = { {"id", &jid, json_type_string, 1}, {"hash", &jhash, json_type_string, 1}, {"size", &jsize, json_type_int, 1}, {"ctime", &jctime, json_type_int, 0}, {"content", &jcontent, json_type_string, 1}, {"pos", &jpos, json_type_string, 1}, {NULL, NULL, 0, 0} }; *pbean = NULL; if (NULL != (err = oio_ext_extract_json (j, mapping))) return err; hid = metautils_gba_from_hexstring(json_object_get_string(jid)); if (!hid) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid header, not hexa id"); goto exit; } hash = metautils_gba_from_hexstring(json_object_get_string(jhash)); if (!hash) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid chunk, not hexa header id"); goto exit; } chunk = _bean_create (&descr_struct_CHUNKS); CHUNKS_set2_id (chunk, json_object_get_string(jid)); CHUNKS_set_hash (chunk, hash); CHUNKS_set_size (chunk, json_object_get_int64(jsize)); CHUNKS_set_ctime (chunk, !jctime ? g_get_real_time() / G_TIME_SPAN_SECOND : json_object_get_int64(jctime)); CHUNKS_set_content (chunk, hid); CHUNKS_set2_position (chunk, json_object_get_string (jpos)); *pbean = chunk; chunk = NULL; exit: metautils_gba_unref (hid); metautils_gba_unref (hash); _bean_clean (chunk); return err; }
status_t drakvuf_get_function_rva(const char *rekall_profile, const char *function, addr_t *rva) { json_object *root = json_object_from_file(rekall_profile); if(!root) { fprintf(stderr, "Rekall profile couldn't be opened!\n"); goto err_exit; } json_object *functions = NULL, *jsymbol = NULL; if (!json_object_object_get_ex(root, "$FUNCTIONS", &functions)) { PRINT_DEBUG("Rekall profile: no $FUNCTIONS section found\n"); goto err_exit; } if (!json_object_object_get_ex(functions, function, &jsymbol)) { PRINT_DEBUG("Rekall profile: no '%s' found\n", function); json_object_put(functions); goto err_exit; } *rva = json_object_get_int64(jsymbol); json_object_put(functions); json_object_put(jsymbol); return VMI_SUCCESS; err_exit: return VMI_FAILURE; }
/** * Get value. * @param nDefault Default value. * @return Long value. * * @since GDAL 2.3 */ GInt64 CPLJSONObject::ToLong(GInt64 nDefault) const { if( m_poJsonObject /*&& json_object_get_type( TO_JSONOBJ(m_poJsonObject) ) == json_type_int*/ ) return static_cast<GInt64>( json_object_get_int64( TO_JSONOBJ(m_poJsonObject) ) ); return nDefault; }
void PacketStats::deserialize(json_object *o) { json_object *obj; if(!o) return; if(json_object_object_get_ex(o, "upTo64", &obj)) upTo64 = json_object_get_int64(obj); else upTo64 = 0; if(json_object_object_get_ex(o, "upTo128", &obj)) upTo128 = json_object_get_int64(obj); else upTo128 = 0; if(json_object_object_get_ex(o, "upTo256", &obj)) upTo256 = json_object_get_int64(obj); else upTo256 = 0; if(json_object_object_get_ex(o, "upTo512", &obj)) upTo512 = json_object_get_int64(obj); else upTo512 = 0; if(json_object_object_get_ex(o, "upTo1024", &obj)) upTo1024 = json_object_get_int64(obj); else upTo1024 = 0; if(json_object_object_get_ex(o, "upTo1518", &obj)) upTo1518 = json_object_get_int64(obj); else upTo1518 = 0; if(json_object_object_get_ex(o, "upTo2500", &obj)) upTo2500 = json_object_get_int64(obj); else upTo2500 = 0; if(json_object_object_get_ex(o, "upTo6500", &obj)) upTo6500 = json_object_get_int64(obj); else upTo6500 = 0; if(json_object_object_get_ex(o, "upTo9000", &obj)) upTo9000 = json_object_get_int64(obj); else upTo9000 = 0; if(json_object_object_get_ex(o, "above9000", &obj)) above9000 = json_object_get_int64(obj); else above9000 = 0; }
bool Bitmaszyna::marketChart(string market,long starttime,long endtime,long interval) { struct json_object *json,*jtmp2,*jtmp3; struct array_list *arr; int i; curl_mutex.lock(); if (getTickerData(market+"/chartData.json?starttime="+to_stringl(starttime).toStdString()+"&endtime="+to_stringl(endtime).toStdString()+"&interval="+to_stringl(interval).toStdString(),&json)) { base->swieczki.clear(); arr=json_object_get_array(json); for(i=0;i<arr->length;i++) { Ohlc s; jtmp2=(struct json_object *)array_list_get_idx(arr,i); json_object_object_get_ex(jtmp2,"time",&jtmp3); s.t=json_object_get_int64(jtmp3); json_object_object_get_ex(jtmp2,"open",&jtmp3); s.o=json_object_get_double(jtmp3); json_object_object_get_ex(jtmp2,"close",&jtmp3); s.c=json_object_get_double(jtmp3); json_object_object_get_ex(jtmp2,"high",&jtmp3); s.h=json_object_get_double(jtmp3); json_object_object_get_ex(jtmp2,"low",&jtmp3); s.l=json_object_get_double(jtmp3); base->swieczki.push_back(s); } json_object_put(json); curl_mutex.unlock(); return(true); } curl_mutex.unlock(); return(false); }
/* * 业务入口函数 * 输入: * req_json:请求串,json格式 * req_id:用于记录在日志中的id * 输出: * p_out_string:输出字符串,一般拼成json格式 * n_out_len:输出字符串的长度 * */ int SimpleFrameWorkInterface::work_core(json_object *req_json, char* &p_out_string, int& n_out_len, int64_t req_id){ int spend_msec = -1; struct timeval tv_temp; tv_temp = calc_spend_time(tv_temp, "example_work start", spend_msec, true); int result = 0; json_object* cmd_json = json_object_object_get(req_json, "cmd"); char* cmd_str = NULL; if(NULL == cmd_json){ cmd_str = "query"; }else{ cmd_str = (char*)json_object_get_string(cmd_json); } if(strcasecmp(cmd_str, "query") != 0){ return return_fail(403, "command not support", p_out_string, n_out_len); } json_object* uid_json = json_object_object_get(req_json, "uid"); if(NULL == uid_json){ return return_fail(403, "uid is empty", p_out_string, n_out_len); }else{ uint64_t uid = json_object_get_int64(uid_json); result = get_uid_good_follow(uid, req_id, p_out_string, n_out_len); } tv_temp = calc_spend_time(tv_temp, "example_work all finish", spend_msec); return result; }
bool Bitmaszyna::lasttrades(string market) { struct json_object *json,*jtmp,*jtmp2; struct array_list *arr; int i; curl_mutex.lock(); trades.clear(); if (getTickerData(market+"/transactions.json",&json)) { arr=json_object_get_array(json); for(i=0;i<arr->length;i++) { Offer offer; jtmp=(struct json_object *)array_list_get_idx(arr,i); json_object_object_get_ex(jtmp,"date",&jtmp2); offer.time=json_object_get_int64(jtmp2); json_object_object_get_ex(jtmp,"price",&jtmp2); offer.price=json_object_get_double(jtmp2); json_object_object_get_ex(jtmp,"amount",&jtmp2); offer.amount=json_object_get_double(jtmp2); json_object_object_get_ex(jtmp,"type",&jtmp2); offer.type=json_object_get_int(jtmp2); json_object_object_get_ex(jtmp,"type",&jtmp2); offer.id=json_object_get_int(jtmp2); trades.push_back(offer); } json_object_put(json); curl_mutex.unlock(); return(true); } curl_mutex.unlock(); return(false); }
int json_object_to_lua (lua_State *L, json_object *o) { if (o == NULL) lua_pushnil (L); switch (json_object_get_type (o)) { case json_type_object: json_object_to_lua_table (L, o); break; case json_type_array: json_array_to_lua (L, o); break; case json_type_string: lua_pushstring (L, json_object_get_string (o)); break; case json_type_int: lua_pushinteger (L, json_object_get_int64 (o)); break; case json_type_double: lua_pushnumber (L, json_object_get_double (o)); break; case json_type_boolean: lua_pushboolean (L, json_object_get_boolean (o)); break; case json_type_null: /* XXX: crap. */ break; } return (1); }
static uint32_t *_json_parse_nids(json_object *jobj, char *key, int *num) { json_object *j_array = NULL; json_object *j_value = NULL; enum json_type j_type; uint32_t *ents; int i, cnt; *num = 0; json_object_object_get_ex(jobj, key, &j_array); if (!j_array) { debug("%s: key=%s not found in nid specification", prog_name, key); return NULL; } cnt = json_object_array_length(j_array); ents = xmalloc(sizeof(uint32_t) * cnt); for (i = 0; i < cnt; i++) { j_value = json_object_array_get_idx(j_array, i); j_type = json_object_get_type(j_value); if (j_type != json_type_int) { error("%s: Unable to parse nid specification", prog_name); break; } else { ents[i] = (uint32_t) json_object_get_int64(j_value); *num = i + 1; } } return ents; }
OGRPLScenesLayer::OGRPLScenesLayer(OGRPLScenesDataset* poDS, const char* pszName, const char* pszBaseURL, json_object* poObjCount10) { this->poDS = poDS; osBaseURL = pszBaseURL; SetDescription(pszName); poFeatureDefn = new OGRFeatureDefn(pszName); poFeatureDefn->SetGeomType(wkbMultiPolygon); for(int i = 0; i < (int)sizeof(apsAttrs) / (int)sizeof(apsAttrs[0]); i++) { OGRFieldDefn oField(apsAttrs[i].pszName, apsAttrs[i].eType); poFeatureDefn->AddFieldDefn(&oField); } poFeatureDefn->Reference(); poSRS = new OGRSpatialReference(SRS_WKT_WGS84); poFeatureDefn->GetGeomFieldDefn(0)->SetSpatialRef(poSRS); bEOF = FALSE; nFeatureCount = -1; nNextFID = 1; poGeoJSONDS = NULL; poGeoJSONLayer = NULL; poMainFilter = NULL; nPageSize = atoi(CPLGetConfigOption("PLSCENES_PAGE_SIZE", "1000")); bStillInFirstPage = FALSE; bAcquiredAscending = -1; bFilterMustBeClientSideEvaluated = FALSE; ResetReading(); if( poObjCount10 != NULL ) { json_object* poCount = json_object_object_get(poObjCount10, "count"); if( poCount != NULL ) nFeatureCount = MAX(0, json_object_get_int64(poCount)); OGRGeoJSONDataSource* poTmpDS = new OGRGeoJSONDataSource(); OGRGeoJSONReader oReader; oReader.SetFlattenNestedAttributes(true, '.'); oReader.ReadLayer( poTmpDS, "layer", poObjCount10 ); OGRLayer* poTmpLayer = poTmpDS->GetLayer(0); if( poTmpLayer ) { OGRFeatureDefn* poTmpFDefn = poTmpLayer->GetLayerDefn(); std::vector<CPLString> aosNewFields; for(int i=0;i<poTmpFDefn->GetFieldCount();i++) { if( poFeatureDefn->GetFieldIndex(poTmpFDefn->GetFieldDefn(i)->GetNameRef()) < 0 ) aosNewFields.push_back(poTmpFDefn->GetFieldDefn(i)->GetNameRef()); } std::sort(aosNewFields.begin(), aosNewFields.end(), OGRPLScenesLayerFieldNameComparator); for(int i=0;i<(int)aosNewFields.size();i++) { OGRFieldDefn oField(poTmpFDefn->GetFieldDefn(poTmpFDefn->GetFieldIndex(aosNewFields[i]))); poFeatureDefn->AddFieldDefn(&oField); } } delete poTmpDS; } }
GError * namespace_info_init_json_object(struct json_object *obj, struct namespace_info_s *ni) { EXTRA_ASSERT(ni != NULL); struct json_object *ns=NULL, *sz=NULL; struct oio_ext_json_mapping_s mapping[] = { {"ns", &ns, json_type_string, 1}, {"chunksize", &sz, json_type_int, 1}, {NULL, NULL, 0, 0} }; GError *err = oio_ext_extract_json (obj, mapping); if (err) return err; g_strlcpy(ni->name, json_object_get_string(ns), sizeof(ni->name)); ni->chunk_size = json_object_get_int64(sz); if (NULL != (err = _load_hash(obj, "options", ni->options)) || NULL != (err = _load_hash(obj, "storage_policy", ni->storage_policy)) || NULL != (err = _load_hash(obj, "data_security", ni->data_security)) || NULL != (err = _load_hash(obj, "data_treatments", ni->data_treatments)) || NULL != (err = _load_hash(obj, "storage_class", ni->storage_class))) return err; return NULL; }
GError * meta1_service_url_load_json_object(struct json_object *obj, struct meta1_service_url_s **out) { EXTRA_ASSERT(out != NULL); *out = NULL; struct json_object *s=NULL, *t=NULL, *h=NULL, *a=NULL; struct oio_ext_json_mapping_s mapping[] = { {"seq", &s, json_type_int, 1}, {"type", &t, json_type_string, 1}, {"host", &h, json_type_string, 1}, {"args", &a, json_type_string, 1}, {NULL, NULL, 0, 0} }; GError *err = oio_ext_extract_json (obj, mapping); if (err) return err; struct meta1_service_url_s *m1u; size_t argslen = strlen(json_object_get_string(a)); m1u = g_malloc0(sizeof(struct meta1_service_url_s) + 1 + argslen), m1u->seq = json_object_get_int64(s); g_strlcpy(m1u->srvtype, json_object_get_string(t), sizeof(m1u->srvtype)); g_strlcpy(m1u->host, json_object_get_string(h), sizeof(m1u->host)); g_strlcpy(m1u->args, json_object_get_string(a), argslen+1); *out = m1u; return NULL; }
static GError * _body_parse_error (GString *b) { g_assert (b != NULL); struct json_tokener *tok = json_tokener_new (); struct json_object *jbody = json_tokener_parse_ex (tok, b->str, b->len); json_tokener_free (tok); tok = NULL; if (!jbody) return NEWERROR(0, "No error explained"); struct json_object *jcode, *jmsg; struct oio_ext_json_mapping_s map[] = { {"status", &jcode, json_type_int, 0}, {"message", &jmsg, json_type_string, 0}, {NULL, NULL, 0, 0} }; GError *err = oio_ext_extract_json(jbody, map); if (!err) { int code = 0; const char *msg = "Unknown error"; if (jcode) code = json_object_get_int64 (jcode); if (jmsg) msg = json_object_get_string (jmsg); err = NEWERROR(code, "(code=%d) %s", code, msg); } json_object_put (jbody); return err; }
uint8_t md_sqlite_handle_munin_event(struct md_writer_sqlite *mws, struct md_munin_event *mme) { json_object *value; int64_t boottime = 0; json_object *session_obj; if (!json_object_object_get_ex(mme->json_blob, "session", &session_obj)) { META_PRINT_SYSLOG(mws->parent, LOG_ERR, "Failed to read data from session module (Munin)\n"); return RETVAL_FAILURE; } if (!json_object_object_get_ex(session_obj, "start", &value)) return RETVAL_FAILURE; if ((boottime = json_object_get_int64(value)) < 1400000000 ) { META_PRINT_SYSLOG(mws->parent, LOG_ERR, "Failed to read valid start time from session module (Munin): %" PRId64 "\n", boottime); return RETVAL_FAILURE; } sqlite3_stmt *stmt = mws->insert_monitor; sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); if (sqlite3_bind_int(stmt, 1, mws->node_id) || sqlite3_bind_int(stmt, 2, mme->tstamp) || sqlite3_bind_int(stmt, 3, mme->sequence) || sqlite3_bind_int64(stmt, 4, boottime) ){ META_PRINT_SYSLOG(mws->parent, LOG_ERR, "Failed to bind values to INSERT query (Monitor)\n"); return RETVAL_FAILURE; } if (sqlite3_step(stmt) != SQLITE_DONE) return RETVAL_FAILURE; else return RETVAL_SUCCESS; }
static void processor_setup_allocated_memory(processor_state_t *self, json_object *request) { json_object *allocated_memory_obj; if (json_object_object_get_ex(request, "allocated_memory", &allocated_memory_obj)) return; json_object *allocated_objects_obj; if (!json_object_object_get_ex(request, "allocated_objects", &allocated_objects_obj)) return; json_object *allocated_bytes_obj; if (json_object_object_get_ex(request, "allocated_bytes", &allocated_bytes_obj)) { long allocated_objects = json_object_get_int64(allocated_objects_obj); long allocated_bytes = json_object_get_int64(allocated_bytes_obj); // assume 64bit ruby long allocated_memory = allocated_bytes + allocated_objects * 40; json_object_object_add(request, "allocated_memory", json_object_new_int64(allocated_memory)); // printf("[D] allocated memory: %lu\n", allocated_memory); } }
GError* m2v2_json_load_single_alias (struct json_object *j, gpointer *pbean) { GError *err = NULL; GByteArray *hid = NULL; struct bean_ALIASES_s *alias = NULL; struct json_object *jname, *jversion, *jctime, *jmtime, *jheader, *jdel; struct oio_ext_json_mapping_s m[] = { {"name", &jname, json_type_string, 1}, {"ver", &jversion, json_type_int, 1}, {"header", &jheader, json_type_string, 1}, {"ctime", &jctime, json_type_int, 0}, {"mtime", &jmtime, json_type_int, 0}, {"deleted", &jdel, json_type_boolean, 0}, {NULL, NULL, 0, 0} }; *pbean = NULL; if (NULL != (err = oio_ext_extract_json(j, m))) goto exit; hid = metautils_gba_from_hexstring(json_object_get_string(jheader)); if (!hid) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid alias, not hexadecimal header_id"); goto exit; } alias = _bean_create (&descr_struct_ALIASES); ALIASES_set2_alias (alias, json_object_get_string(jname)); ALIASES_set_version (alias, json_object_get_int64(jversion)); ALIASES_set2_content (alias, hid->data, hid->len); ALIASES_set_deleted (alias, json_object_get_boolean(jdel)); ALIASES_set_mtime (alias, json_object_get_int64(jmtime)); ALIASES_set_ctime (alias, json_object_get_int64(jctime)); *pbean = alias; alias = NULL; exit: metautils_gba_unref (hid); _bean_clean (alias); return err; }
void NdpiStats::deserialize(NetworkInterface *iface, json_object *o) { if(!o) return; /* Reset all */ for(int i=0; i<MAX_NDPI_PROTOS; i++) if(counters[i] != NULL) free(counters[i]); memset(counters, 0, sizeof(counters)); for(int proto_id=0; proto_id<MAX_NDPI_PROTOS; proto_id++) { char *name = iface->get_ndpi_proto_name(proto_id); if(name != NULL) { json_object *obj; if(json_object_object_get_ex(o, name, &obj)) { json_object *bytes, *packets; if((counters[proto_id] = (ProtoCounter*)malloc(sizeof(ProtoCounter))) != NULL) { if(json_object_object_get_ex(obj, "bytes", &bytes)) { json_object *sent, *rcvd; if(json_object_object_get_ex(bytes, "sent", &sent)) counters[proto_id]->bytes.sent = json_object_get_int64(sent); if(json_object_object_get_ex(bytes, "rcvd", &rcvd)) counters[proto_id]->bytes.rcvd = json_object_get_int64(rcvd); } if(json_object_object_get_ex(obj, "packets", &packets)) { json_object *sent, *rcvd; if(json_object_object_get_ex(bytes, "sent", &sent)) counters[proto_id]->packets.sent = json_object_get_int64(sent); if(json_object_object_get_ex(bytes, "rcvd", &rcvd)) counters[proto_id]->packets.rcvd = json_object_get_int64(rcvd); } } } } } }
unsigned int get_json_uint(struct json_object *jobj, const char *key) { struct json_object *tmp; tmp = json_object_object_get(jobj, key); if (tmp) { unsigned int ret = (unsigned int)json_object_get_int64(tmp); json_object_put(tmp); return ret; } else { return 0; } }
size_t get_json_size_t(struct json_object * jobj, const char *key) { struct json_object *tmp; tmp = json_object_object_get(jobj, key); if (tmp) { size_t ret = (size_t) json_object_get_int64(tmp); json_object_put(tmp); return ret; } else { return 0; } }
unsigned long parse_setup_response(char * response) { struct json_object * result = json_tokener_parse(response); struct json_object * id_obj; unsigned long id = 0; if (result == NULL || !json_object_object_get_ex(result, "sessionId", &id_obj)) { printf("Something went wrong in processing the string %s\n", response); if (result) json_object_put(result); } else { id = json_object_get_int64(id_obj); json_object_put(result); } return id; }
void NotifyingAlertParser::parseAttribute(const AlertAttribute& attribute, json_object* const & pValue) { switch (attribute) { case AlertAttribute::DESCRIPTION: { const std::string description(json_object_get_string(pValue)); for (listeners::AlertDetailsListener* const & pListener : getListeners()) { pListener->notifyDescription(description); } break; } case AlertAttribute::EXPIRY_TIME: { const time_t expiryTime( static_cast<time_t>(json_object_get_int64(pValue))); for (listeners::AlertDetailsListener* const & pListener : getListeners()) { pListener->notifyExpiryTime(expiryTime); } break; } case AlertAttribute::TITLE: { const std::string title(json_object_get_string(pValue)); for (listeners::AlertDetailsListener* const & pListener : getListeners()) { pListener->notifyTitle(title); } break; } case AlertAttribute::URI: { const std::string uri(json_object_get_string(pValue)); for (listeners::AlertDetailsListener* const & pListener : getListeners()) { pListener->notifyUri(uri); } break; } default: { throw std::logic_error( createUndefinedAttributeErrorMessage(AlertAttribute_NAME, attribute)); break; } } }
Variant json_object_to_variant(json_object *new_obj, const bool assoc, const bool stable_maps, const bool collections) { json_type type; int64_t i64; if (!new_obj) { return Variant(Variant::NullInit()); } type = json_object_get_type(new_obj); switch (type) { case json_type_double: return Variant(json_object_get_double(new_obj)); case json_type_string: return Variant(String(json_object_get_string(new_obj), json_object_get_string_len(new_obj), CopyString)); case json_type_int: i64 = json_object_get_int64(new_obj); if (i64==INT64_MAX || i64==INT64_MIN) { // php notice: integer overflow detected } return Variant(i64); case json_type_boolean: if (json_object_get_boolean(new_obj)) { return Variant(true); } else { return Variant(false); } case json_type_null: return Variant(Variant::NullInit()); case json_type_array: return json_type_array_to_variant(new_obj, assoc, stable_maps, collections); case json_type_object: return json_type_object_to_variant(new_obj, assoc, stable_maps, collections); default: // warning type <type> not yet implemented return Variant(Variant::NullInit()); } }
bool OGRAmigoCloudDataSource::ListDatasets() { std::stringstream url; url << std::string(GetAPIURL()) << "/users/0/projects/" << std::string(GetProjectId()) << "/datasets/?summary"; json_object* result = RunGET(url.str().c_str()); if( result == nullptr ) { CPLError(CE_Failure, CPLE_AppDefined, "AmigoCloud:get failed."); return false; } if( result != nullptr ) { auto type = json_object_get_type(result); if(type == json_type_object) { json_object *poResults = CPL_json_object_object_get(result, "results"); if(poResults != nullptr && json_object_get_type(poResults) == json_type_array) { CPLprintf("List of available datasets for project id: %s\n", GetProjectId()); CPLprintf("| id \t | name\n"); CPLprintf("|--------|-------------------\n"); const auto nSize = json_object_array_length(poResults); for(auto i = decltype(nSize){0}; i < nSize; ++i) { json_object *ds = json_object_array_get_idx(poResults, i); if(ds!=nullptr) { const char *name = nullptr; int64_t dataset_id = 0; json_object *poName = CPL_json_object_object_get(ds, "name"); if (poName != nullptr) { name = json_object_get_string(poName); } json_object *poId = CPL_json_object_object_get(ds, "id"); if (poId != nullptr) { dataset_id = json_object_get_int64(poId); } if (name != nullptr) { std::stringstream str; str << "| " << dataset_id << "\t | " << name; CPLprintf("%s\n", str.str().c_str()); } } } } } json_object_put(result); } return true; }
//FIXME GC corresponding C object when JS object is free. ArrayContainer json_native_object_array_to_container(json_object* json_array) { int i; ArrayContainer ret; int array_length = json_object_array_length(json_array); void** array = g_new0(void*, array_length); // The C implementation should be responsible for free it. ret.num = array_length; ret.data = (void*)array; for (i = 0; i < array_length; ++i) { json_object *obj = json_object_array_get_idx(json_array, i); void* obj_ptr = (void*)json_object_get_int64(obj); array[i] = obj_ptr; } return ret; }
Data* data_json_create(json_object* jso) { char name[64] = {0, }; uint64_t size = 10; //TODO add new value json_object_object_foreach(jso, key, child_object) { if(!strcmp(key, "name")) { strcpy(name, json_object_to_json_string(child_object)); remove_blank(name); } else if(!strcmp(key, "size")) { size = json_object_get_int64(child_object); } } return data_create(name, size); }
static void print_monitor_value_enrichment_int(struct printbuf *buf, const char *key, json_object *val) { errno = 0; int64_t integer = json_object_get_int64(val); if (errno != 0) { char errbuf[BUFSIZ]; const char *errstr = strerror_r(errno, errbuf, sizeof(errbuf)); rdlog(LOG_ERR, "Cannot extract int value of enrichment key %s: %s", key, errstr); } else { sprintbuf(buf, ",\"%s\":%ld", key, integer); } }
int main(int argc, char **argv) { json_object *tmp=json_object_new_int(123); assert (json_object_get_int(tmp)==123); json_object_set_int(tmp,321); assert (json_object_get_int(tmp)==321); printf("INT PASSED\n"); json_object_set_int64(tmp,(int64_t)321321321); assert (json_object_get_int64(tmp)==321321321); json_object_put(tmp); printf("INT64 PASSED\n"); tmp=json_object_new_boolean(TRUE); assert (json_object_get_boolean(tmp)==TRUE); json_object_set_boolean(tmp,FALSE); assert (json_object_get_boolean(tmp)==FALSE); json_object_set_boolean(tmp,TRUE); assert (json_object_get_boolean(tmp)==TRUE); json_object_put(tmp); printf("BOOL PASSED\n"); tmp=json_object_new_double(12.34); assert (json_object_get_double(tmp)==12.34); json_object_set_double(tmp,34.56); assert (json_object_get_double(tmp)==34.56); json_object_set_double(tmp,6435.34); assert (json_object_get_double(tmp)==6435.34); json_object_put(tmp); printf("DOUBLE PASSED\n"); #define SHORT "SHORT" #define MID "A MID STRING" // 12345678901234567890123456789012.... #define HUGE "A string longer than 32 chars as to check non local buf codepath" tmp=json_object_new_string(SHORT); assert (strcmp(json_object_get_string(tmp),SHORT)==0); json_object_set_string(tmp,MID); assert (strcmp(json_object_get_string(tmp),MID)==0); json_object_set_string(tmp,HUGE); assert (strcmp(json_object_get_string(tmp),HUGE)==0); json_object_set_string(tmp,SHORT); assert (strcmp(json_object_get_string(tmp),SHORT)==0); json_object_put(tmp); printf("STRING PASSED\n"); printf("PASSED\n"); return 0; }
void LocalTrafficStats::deserialize(json_object *o) { json_object *obj, *s; if(!o) return; if(json_object_object_get_ex(o, "bytes", &s)) { if(json_object_object_get_ex(s, "local2local", &obj)) bytes.local2local = json_object_get_int64(obj); if(json_object_object_get_ex(s, "local2remote", &obj)) bytes.local2remote = json_object_get_int64(obj); if(json_object_object_get_ex(s, "remote2local", &obj)) bytes.remote2local = json_object_get_int64(obj); if(json_object_object_get_ex(s, "remote2remote", &obj)) bytes.remote2remote = json_object_get_int64(obj); } if(json_object_object_get_ex(o, "packets", &s)) { if(json_object_object_get_ex(s, "local2local", &obj)) packets.local2local = json_object_get_int64(obj); if(json_object_object_get_ex(s, "local2remote", &obj)) packets.local2remote = json_object_get_int64(obj); if(json_object_object_get_ex(s, "remote2local", &obj)) packets.remote2local = json_object_get_int64(obj); if(json_object_object_get_ex(s, "remote2remote", &obj)) packets.remote2remote = json_object_get_int64(obj); } }