/* * Helper functions */ json_object *parse_json_len(const char *json_area, int length, int *end_offset) { json_object *jobj; struct json_tokener *jtok; if (!json_area || length <= 0) return NULL; jtok = json_tokener_new(); if (!jtok) { log_dbg("ERROR: Failed to init json tokener"); return NULL; } jobj = json_tokener_parse_ex(jtok, json_area, length); if (!jobj) log_dbg("ERROR: Failed to parse json data (%d): %s", json_tokener_get_error(jtok), json_tokener_error_desc(json_tokener_get_error(jtok))); else *end_offset = jtok->char_offset; json_tokener_free(jtok); return jobj; }
bool JSON_parser(Variant &return_value, const char *data, int data_len, bool assoc, int depth, int64_t options) { json_tokener *tok; json_object *new_obj; bool retval = false; #if JSON_C_MINOR_VERSION >= 11 tok = json_tokener_new_ex(depth); #else tok = json_tokener_new(); #endif if (!tok) { return retval; } //if (!(options & k_JSON_FB_LOOSE)) { // json_tokener_set_flags(tok, JSON_TOKENER_STRICT); //} bool const stable_maps = options & k_JSON_FB_STABLE_MAPS; bool const collections = stable_maps || options & k_JSON_FB_COLLECTIONS; new_obj = json_tokener_parse_ex(tok, data, data_len); if (json_tokener_get_error(tok)==json_tokener_continue) { new_obj = json_tokener_parse_ex(tok, "", -1); } if (new_obj) { return_value = json_object_to_variant(new_obj, assoc, stable_maps, collections); json_object_put(new_obj); retval = true; } else { switch (json_tokener_get_error(tok)) { case json_tokener_success: retval = true; break; case json_tokener_error_depth: json_set_last_error_code(json_error_codes::JSON_ERROR_DEPTH); break; default: json_set_last_error_code(json_error_codes::JSON_ERROR_SYNTAX, json_tokener_get_error(tok)); } } json_tokener_free(tok); return retval; }
static int jsonrpc_session_receive(struct jsonrpc_session *session, void *in, size_t len) { struct json_tokener *tok = session->tok; struct json_object *request = NULL, *response = NULL; unsigned char *reply = NULL; size_t reply_len = 0; request = json_tokener_parse_ex(tok, (const char *)in, (int)len); if (!request) { enum json_tokener_error err = json_tokener_get_error(tok); if (err == json_tokener_continue) return 0; return -1; } jsonrpc_call(session, request, &response); reply_len = LWS_PRE + strlen(json_object_get_string(response)); reply = (unsigned char *)calloc(1, reply_len + 1); strcpy((char *)(&reply[LWS_PRE]), json_object_get_string(response)); lws_write(session->wsi, &reply[LWS_PRE], reply_len - LWS_PRE, LWS_WRITE_TEXT); free(reply); json_object_put(request); json_object_put(response); return 0; }
/** * Parse the data retrieved by CURL into a JSON object - to be supplied to * curl_easy_setopt with CURLOPT_WRITEFUNCTION and called by * curl_easy_perform. * * @param ptr Pointer to the retrieved (piece of) data that should be * parsed. * @param size Size of each retrieved data chunk. * @param nmemb Number of retrieved data chunks. * @param userdata The data supplied to curl_easy_setopt as * CURLOPT_WRITEDATA, must be struct * tlog_es_reader_write_data. * * @return Number of bytes processed, signals error if different from * size*nmemb, can be CURL_WRITEFUNC_PAUSE to signal transfer pause. */ static size_t tlog_es_reader_write_func(char *ptr, size_t size, size_t nmemb, void *userdata) { struct tlog_es_reader_write_data *data = (struct tlog_es_reader_write_data *)userdata; size_t len = size * nmemb; assert(ptr != NULL || len == 0); assert(data != NULL); if (len == 0) { return len; } if (data->obj != NULL) { return !len; } data->obj = json_tokener_parse_ex(data->tok, ptr, (int)len); if (data->obj == NULL) { data->rc = json_tokener_get_error(data->tok); if (data->rc != json_tokener_continue) { return !len; } } return len; }
static int json_parse(lua_State *L) { size_t len; const char *json = luaL_checklstring(L, 1, &len); struct json_state s = { .tok = json_tokener_new() }; if (!s.tok) return 0; s.obj = json_tokener_parse_ex(s.tok, json, len); s.err = json_tokener_get_error(s.tok); if (s.obj) { _json_to_lua(L, s.obj); json_object_put(s.obj); } else { lua_pushnil(L); } if (s.err == json_tokener_continue) s.err = json_tokener_error_parse_eof; if (s.err) lua_pushstring(L, json_tokener_error_desc(s.err)); json_tokener_free(s.tok); return (1 + !!s.err); }
void fromString(std::string str) { json_object *rootobject; json_tokener *tokener = json_tokener_new(); enum json_tokener_error err; do { rootobject = json_tokener_parse_ex(tokener, str.c_str(),str.length()); } while ((err = json_tokener_get_error(tokener)) == json_tokener_continue); if (err != json_tokener_success) { fprintf(stderr, "Error: %s\n", json_tokener_error_desc(err)); // Handle errors, as appropriate for your application. } if (tokener->char_offset < str.length()) // XXX shouldn't access internal fields { // Handle extra characters after parsed object as desired. // e.g. issue an error, parse another object from that point, etc... } //Good! json_object_object_foreach(rootobject, key, val) { T one(key); N two(std::string(json_object_get_string(val))); append(one,two); }
/** * Process a Keystone authentication response. * This parses the response and saves copies of the interesting service endpoint URLs. */ static size_t process_keystone_response(void *ptr, size_t size, size_t nmemb, void *userdata) { keystone_context_t *context = (keystone_context_t *) userdata; const char *body = (const char *) ptr; size_t len = size * nmemb; struct json_object *jobj; enum json_tokener_error json_err; assert(context->pvt.json_tokeniser != NULL); jobj = json_tokener_parse_ex(context->pvt.json_tokeniser, body, len); json_err = json_tokener_get_error(context->pvt.json_tokeniser); if (json_tokener_success == json_err) { enum keystone_error sc_err = process_keystone_json(context, jobj); if (sc_err != KSERR_SUCCESS) { return 0; /* Failed to process JSON. Inform libcurl no data 'handled' */ } } else if (json_tokener_continue == json_err) { /* Complete JSON response not yet received; continue */ } else { context->json_error("json_tokener_parse_ex", json_err); context->keystone_error("failed to parse response", KSERR_PARSE); return 0; /* Apparent JSON parsing problem. Inform libcurl no data 'handled' */ } return len; /* Inform libcurl that all data were 'handled' */ }
int token_parse_json(struct access_token **tokenp, struct evbuffer *buf) { char cbuf[1024]; int removed; int ret; struct access_token *token; struct json_tokener *tokener; enum json_tokener_error jerr; struct json_object *obj; tokener = json_tokener_new(); if (tokener == NULL) { return ENOMEM; } do { removed = evbuffer_remove(buf, cbuf, sizeof(cbuf)); obj = json_tokener_parse_ex(tokener, cbuf, removed); jerr = json_tokener_get_error(tokener); verbose(FIREHOSE, "%s(): Passed %d bytes, result %p (%s), remaining %zd\n", __func__, removed, obj, json_tokener_error_desc(jerr), evbuffer_get_length(buf)); } while (obj == NULL && jerr == json_tokener_continue && evbuffer_get_length(buf) > 0); json_tokener_free(tokener); if (obj != NULL) { token = malloc(sizeof(*token)); if (token == NULL) { ret = ENOMEM; } else { memset(token, 0, sizeof(*token)); ret = build_token_into(token, obj); if (ret != 0) { token_free(token); } } } else { verbose(FIREHOSE, "%s(): json tokener reported: %s\n", __func__, json_tokener_error_desc(jerr)); } json_object_put(obj); if (ret == 0) { *tokenp = token; } return ret; }
/** Couchbase callback for get (read) operations * * @param instance Couchbase connection instance. * @param cookie Couchbase cookie for returning information from callbacks. * @param error Couchbase error object. * @param resp Couchbase get operation response object. */ void couchbase_get_callback(lcb_t instance, const void *cookie, lcb_error_t error, const lcb_get_resp_t *resp) { cookie_u cu; /* union of const and non const pointers */ cu.cdata = cookie; /* set const union member to cookie passed from couchbase */ cookie_t *c = (cookie_t *) cu.data; /* set our cookie struct using non-const member */ const char *bytes = resp->v.v0.bytes; /* the payload of this chunk */ lcb_size_t nbytes = resp->v.v0.nbytes; /* length of this data chunk */ /* check error */ switch (error) { case LCB_SUCCESS: /* check for valid bytes */ if (bytes && nbytes > 1) { /* debug */ DEBUG("rlm_couchbase: (get_callback) got %zu bytes", nbytes); /* parse string to json object */ c->jobj = json_tokener_parse_ex(c->jtok, bytes, nbytes); /* switch on tokener error */ switch ((c->jerr = json_tokener_get_error(c->jtok))) { case json_tokener_continue: /* check object - should be null */ if (c->jobj != NULL) { ERROR("rlm_couchbase: (get_callback) object not null on continue!"); } break; case json_tokener_success: /* do nothing */ break; default: /* log error */ ERROR("rlm_couchbase: (get_callback) json parsing error: %s", json_tokener_error_desc(c->jerr)); break; } } break; case LCB_KEY_ENOENT: /* ignored */ DEBUG("rlm_couchbase: (get_callback) key does not exist"); break; default: /* log error */ ERROR("rlm_couchbase: (get_callback) %s (0x%x)", lcb_strerror(instance, error), error); break; } }
static size_t CPLJSONWriteFunction(void *pBuffer, size_t nSize, size_t nMemb, void *pUserData) { size_t nLength = nSize * nMemb; JsonContextL ctx = static_cast<JsonContextL>(pUserData); ctx->pObject = json_tokener_parse_ex(ctx->pTokener, static_cast<const char*>(pBuffer), static_cast<int>(nLength)); ctx->nDataLen = static_cast<int>(nLength); switch (json_tokener_get_error(ctx->pTokener)) { case json_tokener_continue: case json_tokener_success: return nLength; default: return 0; /* error: interrupt the transfer */ } }
/** * Read the memory reader text as a JSON object line, don't consume * terminating newline. The line must not start with whitespace. * * @param mem_json_reader The memory reader to parse the object for. * @param pobject Location for the parsed object pointer. * * @return Global return code. */ static tlog_grc tlog_mem_json_reader_read_json(struct tlog_mem_json_reader *mem_json_reader, struct json_object **pobject) { const char *p; struct json_object *object; enum json_tokener_error jerr; assert(tlog_mem_json_reader_is_valid( (struct tlog_json_reader *)mem_json_reader)); assert(pobject != NULL); json_tokener_reset(mem_json_reader->tok); /* Look for a terminating newline */ for (p = mem_json_reader->pos; p < mem_json_reader->end && *p != '\n'; p++); /* If the line is empty */ if (p == mem_json_reader->pos) { /* Report EOF */ *pobject = NULL; return TLOG_RC_OK; } /* Parse the line */ object = json_tokener_parse_ex(mem_json_reader->tok, mem_json_reader->pos, p - mem_json_reader->pos); mem_json_reader->pos = p; if (object == NULL) { jerr = json_tokener_get_error(mem_json_reader->tok); return (jerr == json_tokener_continue) ? TLOG_RC_MEM_JSON_READER_INCOMPLETE_LINE : TLOG_GRC_FROM(json, jerr); } /* Return the parsed object */ *pobject = object; return TLOG_RC_OK; }
bool CPLJSONDocument::LoadUrl(const std::string & /*osUrl*/, char ** /*papszOptions*/, GDALProgressFunc /*pfnProgress*/, void * /*pProgressArg*/) #endif // HAVE_CURL { #ifdef HAVE_CURL int nDepth = atoi( CSLFetchNameValueDef( papszOptions, "JSON_DEPTH", "10") ); JsonContext ctx = { nullptr, json_tokener_new_ex(nDepth), 0 }; CPLHTTPFetchWriteFunc pWriteFunc = CPLJSONWriteFunction; CPLHTTPResult *psResult = CPLHTTPFetchEx( osUrl.c_str(), papszOptions, pfnProgress, pProgressArg, pWriteFunc, &ctx ); bool bResult = true; if( psResult->nStatus != 0 /*CURLE_OK*/ ) { bResult = false; } CPLHTTPDestroyResult( psResult ); enum json_tokener_error jerr; if ((jerr = json_tokener_get_error(ctx.pTokener)) != json_tokener_success) { CPLError(CE_Failure, CPLE_AppDefined, "JSON error: %s\n", json_tokener_error_desc(jerr)); bResult = false; } else { if( m_poRootJsonObject ) json_object_put( TO_JSONOBJ(m_poRootJsonObject) ); m_poRootJsonObject = ctx.pObject; } json_tokener_free(ctx.pTokener); return bResult; #else return false; #endif }
/** Couchbase callback for http (view) operations * * @param request Couchbase http request object. * @param instance Couchbase connection instance. * @param cookie Couchbase cookie for returning information from callbacks. * @param error Couchbase error object. * @param resp Couchbase http response object. */ void couchbase_http_data_callback(lcb_http_request_t request, lcb_t instance, const void *cookie, lcb_error_t error, const lcb_http_resp_t *resp) { cookie_u cu; /* union of const and non const pointers */ cu.cdata = cookie; /* set const union member to cookie passed from couchbase */ cookie_t *c = (cookie_t *) cu.data; /* set our cookie struct using non-const member */ const char *bytes = resp->v.v0.bytes; /* the payload of this chunk */ lcb_size_t nbytes = resp->v.v0.nbytes; /* length of this data chunk */ /* check error */ switch (error) { case LCB_SUCCESS: /* check for valid bytes */ if (bytes && nbytes > 1) { /* debug */ DEBUG("rlm_couchbase: (http_data_callback) got %zu bytes", nbytes); /* build json object */ c->jobj = json_tokener_parse_ex(c->jtok, bytes, nbytes); /* switch on current error status */ switch ((c->jerr = json_tokener_get_error(c->jtok))) { case json_tokener_continue: /* do nothing */ break; case json_tokener_success: /* do nothing */ break; default: /* log error */ ERROR("rlm_couchbase: (http_data_callback) JSON Tokener error: %s", json_tokener_error_desc(c->jerr)); break; } } break; default: /* log error */ ERROR("rlm_couchbase: (http_data_callback) %s (0x%x)", lcb_strerror(instance, error), error); break; } /* silent compiler */ (void)request; }
static int json_parse_chunk(lua_State *L) { size_t len; struct json_state *s = luaL_checkudata(L, 1, LUCI_JSONC_PARSER); const char *chunk = luaL_checklstring(L, 2, &len); s->obj = json_tokener_parse_ex(s->tok, chunk, len); s->err = json_tokener_get_error(s->tok); if (!s->err) { lua_pushboolean(L, true); return 1; } else if (s->err == json_tokener_continue) { lua_pushboolean(L, false); return 1; } lua_pushnil(L); lua_pushstring(L, json_tokener_error_desc(s->err)); return 2; }
void DatabaseSink::parseConfig() { json_object *rootobject; json_tokener *tokener = json_tokener_new(); enum json_tokener_error err; do { rootobject = json_tokener_parse_ex(tokener, configuration["properties"].c_str(),configuration["properties"].size()); } while ((err = json_tokener_get_error(tokener)) == json_tokener_continue); if (err != json_tokener_success) { fprintf(stderr, "Error: %s\n", json_tokener_error_desc(err)); } if (tokener->char_offset < configuration["properties"].size()) // XXX shouldn't access internal fields { //Should handle the extra data here sometime... } json_object *propobject = json_object_object_get(rootobject,"properties"); g_assert(json_object_get_type(propobject) == json_type_array); array_list *proplist = json_object_get_array(propobject); for(int i=0; i < array_list_length(proplist); i++) { json_object *idxobj = (json_object*)array_list_get_idx(proplist,i); std::string prop = json_object_get_string(idxobj); propertiesToSubscribeTo.push_back(prop); DebugOut()<<"DatabaseSink logging: "<<prop<<endl; } json_object_put(propobject); json_object_put(rootobject); }
tlog_grc tlog_journal_json_reader_read(struct tlog_json_reader *reader, struct json_object **pobject) { struct tlog_journal_json_reader *journal_json_reader = (struct tlog_journal_json_reader*)reader; tlog_grc grc; int sd_rc; struct json_object *object = NULL; /* If we ran out of time limit */ if (journal_json_reader->last > journal_json_reader->until) { goto exit; } /* Advance to the next entry */ sd_rc = sd_journal_next(journal_json_reader->journal); /* If failed */ if (sd_rc < 0) { grc = TLOG_GRC_FROM(systemd, sd_rc); goto cleanup; /* If got an entry */ } else if (sd_rc > 0) { const char *field_ptr; size_t field_len; const char *message_ptr; size_t message_len; /* Advance entry counter */ journal_json_reader->entry++; /* Get the entry realtime timestamp */ sd_rc = sd_journal_get_realtime_usec(journal_json_reader->journal, &journal_json_reader->last); if (sd_rc < 0) { grc = TLOG_GRC_FROM(systemd, sd_rc); goto cleanup; } if (journal_json_reader->last > journal_json_reader->until) { goto exit; } /* Get the entry message field data */ sd_rc = sd_journal_get_data(journal_json_reader->journal, "MESSAGE", (const void **)&field_ptr, &field_len); if (sd_rc < 0) { grc = TLOG_GRC_FROM(systemd, sd_rc); goto cleanup; } /* Extract the message */ message_ptr = (const char *)memchr(field_ptr, '=', field_len); if (message_ptr == NULL) { grc = TLOG_RC_FAILURE; goto cleanup; } message_ptr++; message_len = field_len - (message_ptr - field_ptr); /* Parse the message */ object = json_tokener_parse_ex(journal_json_reader->tok, message_ptr, message_len); if (object == NULL) { grc = TLOG_GRC_FROM( json, json_tokener_get_error(journal_json_reader->tok)); goto cleanup; } } exit: *pobject = object; object = NULL; grc = TLOG_RC_OK; cleanup: json_object_put(object); return grc; }
DWORD WINAPI CollabSocket::recvHandler(void *_sock) { #else void *CollabSocket::recvHandler(void *_sock) { #endif static qstring b; unsigned char buf[2048]; //read a large chunk, we'll be notified if there is more CollabSocket *sock = (CollabSocket*)_sock; json_tokener *tok = json_tokener_new(); while (sock->isConnected()) { int len = sock->recv(buf, sizeof(buf) - 1); if (len == 0) { //end of file break; } if (len < 0) { #ifdef _WIN32 //timeouts are okay if (WSAGetLastError() == WSAETIMEDOUT) { continue; } #else //timeouts are okay if (errno == EAGAIN || errno == EWOULDBLOCK) { continue; } #endif // assumption is that socket is borked and next send will fail also // maybe should close socket here at a minimum. // in any case thread is exiting // reconnecting is a better strategy break; } if (sock->_disp) { json_object *jobj = NULL; enum json_tokener_error jerr; buf[len] = 0; b.append((char*)buf, len); //append new data into static buffer while (1) { jobj = json_tokener_parse_ex(tok, b.c_str(), (int)b.length()); jerr = json_tokener_get_error(tok); if (jerr == json_tokener_continue) { //json object is syntactically correct, but incomplete // msg("json_tokener_continue for %s\n", b.c_str()); break; } else if (jerr != json_tokener_success) { //need to reconnect socket and in the meantime start caching event locally // msg("jerr != json_tokener_success for %s\n", b.c_str()); //need to break out of both loops goto end_loop; } else if (jobj != NULL) { //we extracted a json object from the front of the string //queue it and trim the string // msg("json_tokener_success for %s\n", b.c_str()); if (tok->char_offset < b.length()) { //shift any remaining portions of the buffer to the front b.remove(0, tok->char_offset); } else { b.clear(); } sock->drt->queueObject(jobj); } } json_tokener_reset(tok); } } end_loop: json_tokener_free(tok); sock->cleanup(); return 0; } int send_all(const qstring &s) { if (comm) { return comm->sendAll(s); } return 0; } int send_msg(const qstring &s) { if (comm) { return comm->sendAll(s); } else { if (changeCache != NULL) { // msg("writing to change cache\n"); *changeCache += s; return (int)s.length(); } } return 0; }
void CANGenPlugin::dataReceived(libwebsocket* socket, const char* data, size_t len) { if(!data || len == 0) return; //TODO: refactor ? copied from websocketsink std::unique_ptr<json_object, decltype(&json_object_put)> rootobject(nullptr, &json_object_put); std::unique_ptr<json_tokener, decltype(&json_tokener_free)> tokener(json_tokener_new(), &json_tokener_free); enum json_tokener_error err; do { std::unique_ptr<json_object, decltype(&json_object_put)> tmpobject(json_tokener_parse_ex(tokener.get(), data, len), &json_object_put); rootobject.swap(tmpobject); } while ((err = json_tokener_get_error(tokener.get())) == json_tokener_continue); if (err != json_tokener_success) { LOG_ERROR("Error: " << json_tokener_error_desc(err) << std::endl); return; } if(!rootobject) { LOG_ERROR("Failed to parse json: " << data << std::endl); return; } if (tokener->char_offset < len) // XXX shouldn't access internal fields { // Handle extra characters after parsed object as desired. // e.g. issue an error, parse another object from that point, etc... } // Success, use jobj here. json_object *typeobject = json_object_object_get(rootobject.get(),"type"); json_object *nameobject = json_object_object_get(rootobject.get(),"name"); json_object *transidobject = json_object_object_get(rootobject.get(),"transactionid"); if(!typeobject || !nameobject || !transidobject) { DebugOut(DebugOut::Warning)<<"Malformed json. aborting"<<endl; return; } string type = string(json_object_get_string(typeobject)); string name = string(json_object_get_string(nameobject)); string id; if (json_object_get_type(transidobject) == json_type_string) { id = string(json_object_get_string(transidobject)); } else { stringstream strstr; strstr << json_object_get_int(transidobject); id = strstr.str(); } if (type == "method") { vector<string> propertyNames; list< std::tuple<string, string, string, Zone::Type, string> > propertyData; json_object *dataobject = json_object_object_get(rootobject.get(),"data"); if (json_object_get_type(dataobject) == json_type_array) { array_list *arraylist = json_object_get_array(dataobject); for (int i=0;i<array_list_length(arraylist);i++) { json_object *arrayobject = (json_object*)array_list_get_idx(arraylist,i); if (json_object_get_type(arrayobject) == json_type_object) { json_object *interfaceobject = json_object_object_get(arrayobject,"interface"); json_object *propobject = json_object_object_get(arrayobject,"property"); json_object *valueobject = json_object_object_get(arrayobject,"value"); json_object *zoneobject = json_object_object_get(arrayobject,"zone"); json_object *sourceobject = json_object_object_get(arrayobject,"source"); string interfacestr = string(interfaceobject ? json_object_get_string(interfaceobject) : "vcan0"); string keystr = string(propobject ? json_object_get_string(propobject) : ""); string valuestr = string(valueobject ? json_object_get_string(valueobject): ""); string sourcestr = string(sourceobject ? json_object_get_string(sourceobject): ""); Zone::Type z(Zone::None); if(zoneobject){ try { z = static_cast<Zone::Type>(boost::lexical_cast<int,std::string>(json_object_get_string(zoneobject))); } catch (...) { } } propertyData.push_back(make_tuple(interfacestr, keystr, valuestr, z, sourcestr)); } else if (json_object_get_type(arrayobject) == json_type_string) { string propertyName = string(json_object_get_string(arrayobject)); propertyNames.push_back(propertyName); } } //array_list_free(arraylist); } else { string path = json_object_get_string(dataobject); if (path != "") { propertyNames.push_back(path); } } if (type == "method") { if (name == "get") { if (!propertyNames.empty()) { //GetProperty is going to be a singleshot sink. getValue(socket,propertyNames.front(),Zone::None,id); } else if (!propertyData.empty()) { //GetProperty is going to be a singleshot sink. auto prop = propertyData.front(); getValue(socket,std::get<1>(prop),std::get<3>(prop),id); } else { LOG_WARNING(" \"get\" method called with no data! Transaction ID:" << id); } } else if (name == "set") { if (!propertyNames.empty()) { //Should not happen } else if (!propertyData.empty()) { auto prop = propertyData.begin(); for (auto prop = propertyData.begin(); prop != propertyData.end(); ++prop) { LOG_MESSAGE("websocketsinkmanager setting " << std::get<1>(*prop) << " to " << std::get<2>(*prop) << " in zone " << std::get<3>(*prop)); setValue(socket,std::get<1>(*prop),std::get<2>(*prop),std::get<3>(*prop),std::get<0>(*prop), id); } } } else if (name == "getSupportedEventTypes") { //If data.front() dosen't contain a property name, return a list of properties supported. //if it does, then return the event types that particular property supports. string typessupported = ""; if (propertyNames.empty()) { //Send what properties we support PropertyList foo(routingEngine->supported()); PropertyList::const_iterator i=foo.cbegin(); while (i != foo.cend()) { if(i==foo.cbegin()) typessupported.append("\"").append((*i)).append("\""); else typessupported.append(",\"").append((*i)).append("\""); ++i; } } else { //Send what events a particular property supports PropertyList foo(routingEngine->supported()); if (ListPlusPlus<VehicleProperty::Property>(&foo).contains(propertyNames.front())) { //sinkManager->addSingleShotSink(wsi,data.front(),id); typessupported = "\"get\",\"getSupportedEventTypes\""; } } stringstream s; string s2; s << "{\"type\":\"methodReply\",\"name\":\"getSupportedEventTypes\",\"data\":[" << typessupported << "],\"transactionid\":\"" << id << "\"}"; string replystr = s.str(); LOG_INFO(" JSON Reply: " << replystr); WebSockets::Write(socket, replystr); } else { DebugOut(0)<<"Unknown method called."<<endl; } } } }
/** * Read the fd reader text as (a part of) a JSON object line, don't consume * terminating newline. * * @param fd_json_reader The fd reader to parse the object for. * @param pobject Location for the parsed object pointer. * * @return Global return code. */ static tlog_grc tlog_fd_json_reader_read_json(struct tlog_fd_json_reader *fd_json_reader, struct json_object **pobject) { tlog_grc grc; char *p; struct json_object *object; enum json_tokener_error jerr; bool got_text = false; assert(tlog_fd_json_reader_is_valid( (struct tlog_json_reader *)fd_json_reader)); assert(pobject != NULL); json_tokener_reset(fd_json_reader->tok); /* Until EOF */ do { /* If the buffer is not empty */ if (fd_json_reader->pos < fd_json_reader->end) { /* We got something to parse */ got_text = true; /* Look for a terminating newline */ for (p = fd_json_reader->pos; p < fd_json_reader->end && *p != '\n'; p++); /* Parse the next piece */ object = json_tokener_parse_ex(fd_json_reader->tok, fd_json_reader->pos, p - fd_json_reader->pos); fd_json_reader->pos = p; /* If we finished parsing an object */ if (object != NULL) { *pobject = object; return TLOG_RC_OK; } else { jerr = json_tokener_get_error(fd_json_reader->tok); /* If object is not finished */ if (jerr == json_tokener_continue) { /* If we encountered an object-terminating newline */ if (p < fd_json_reader->end) { return TLOG_RC_FD_JSON_READER_INCOMPLETE_LINE; } } else { return TLOG_GRC_FROM(json, jerr); } } } grc = tlog_fd_json_reader_refill_buf(fd_json_reader); if (grc != TLOG_RC_OK) return grc; } while (fd_json_reader->end > fd_json_reader->buf); if (got_text) { return TLOG_RC_FD_JSON_READER_INCOMPLETE_LINE; } else { *pobject = NULL; return TLOG_RC_OK; } }
/** * Load json document from file using small chunks of data. * @param osPath Path to json document file. * @param nChunkSize Chunk size. * @param pfnProgress a function to report progress of the json data loading. * @param pProgressArg application data passed into progress function. * @return true on success. If error occurred it can be received using CPLGetLastErrorMsg method. * * @since GDAL 2.3 */ bool CPLJSONDocument::LoadChunks(const std::string &osPath, size_t nChunkSize, GDALProgressFunc pfnProgress, void *pProgressArg) { VSIStatBufL sStatBuf; if(VSIStatL( osPath.c_str(), &sStatBuf ) != 0) { CPLError(CE_Failure, CPLE_FileIO, "Cannot open %s", osPath.c_str()); return false; } VSILFILE *fp = VSIFOpenL( osPath.c_str(), "rb" ); if( fp == nullptr ) { CPLError(CE_Failure, CPLE_FileIO, "Cannot open %s", osPath.c_str()); return false; } void *pBuffer = CPLMalloc( nChunkSize ); json_tokener *tok = json_tokener_new(); bool bSuccess = true; GUInt32 nFileSize = static_cast<GUInt32>(sStatBuf.st_size); double dfTotalRead = 0.0; while( true ) { size_t nRead = VSIFReadL( pBuffer, 1, nChunkSize, fp ); dfTotalRead += nRead; if( m_poRootJsonObject ) json_object_put( TO_JSONOBJ(m_poRootJsonObject) ); m_poRootJsonObject = json_tokener_parse_ex(tok, static_cast<const char*>(pBuffer), static_cast<int>(nRead)); enum json_tokener_error jerr = json_tokener_get_error(tok); if(jerr != json_tokener_continue && jerr != json_tokener_success) { CPLError( CE_Failure, CPLE_AppDefined, "JSON error: %s", json_tokener_error_desc(jerr) ); bSuccess = false; break; } if( nRead < nChunkSize ) { break; } if( nullptr != pfnProgress ) { pfnProgress(dfTotalRead / nFileSize, "Loading ...", pProgressArg); } } json_tokener_free(tok); CPLFree(pBuffer); VSIFCloseL(fp); if( nullptr != pfnProgress ) { pfnProgress(1.0, "Loading ...", pProgressArg); } return bSuccess; }
static void test_incremental_parse() { json_object *new_obj; enum json_tokener_error jerr; json_tokener *tok; const char *string_to_parse; int ii; int num_ok, num_error; num_ok = 0; num_error = 0; printf("Starting incremental tests.\n"); printf("Note: quotes and backslashes seen in the output here are literal values passed\n"); printf(" to the parse functions. e.g. this is 4 characters: \"\\f\"\n"); string_to_parse = "{ \"foo"; /* } */ printf("json_tokener_parse(%s) ... ", string_to_parse); new_obj = json_tokener_parse(string_to_parse); if (new_obj == NULL) printf("got error as expected\n"); /* test incremental parsing in various forms */ tok = json_tokener_new(); for (ii = 0; incremental_steps[ii].string_to_parse != NULL; ii++) { int this_step_ok = 0; struct incremental_step *step = &incremental_steps[ii]; int length = step->length; int expected_char_offset = step->char_offset; if (step->reset_tokener & 2) json_tokener_set_flags(tok, JSON_TOKENER_STRICT); else json_tokener_set_flags(tok, 0); if (length == -1) length = strlen(step->string_to_parse); if (expected_char_offset == -1) expected_char_offset = length; printf("json_tokener_parse_ex(tok, %-12s, %3d) ... ", step->string_to_parse, length); new_obj = json_tokener_parse_ex(tok, step->string_to_parse, length); jerr = json_tokener_get_error(tok); if (step->expected_error != json_tokener_success) { if (new_obj != NULL) printf("ERROR: invalid object returned: %s\n", json_object_to_json_string(new_obj)); else if (jerr != step->expected_error) printf("ERROR: got wrong error: %s\n", json_tokener_error_desc(jerr)); else if (tok->char_offset != expected_char_offset) printf("ERROR: wrong char_offset %d != expected %d\n", tok->char_offset, expected_char_offset); else { printf("OK: got correct error: %s\n", json_tokener_error_desc(jerr)); this_step_ok = 1; } } else { if (new_obj == NULL) printf("ERROR: expected valid object, instead: %s\n", json_tokener_error_desc(jerr)); else if (tok->char_offset != expected_char_offset) printf("ERROR: wrong char_offset %d != expected %d\n", tok->char_offset, expected_char_offset); else { printf("OK: got object of type [%s]: %s\n", json_type_to_name(json_object_get_type(new_obj)), json_object_to_json_string(new_obj)); this_step_ok = 1; } } if (new_obj) json_object_put(new_obj); if (step->reset_tokener & 1) json_tokener_reset(tok); if (this_step_ok) num_ok++; else num_error++; } json_tokener_free(tok); printf("End Incremental Tests OK=%d ERROR=%d\n", num_ok, num_error); return; }
static int handle_buffering(TSCont contp, JCrusherData * data) { TSVIO upstream_vio; TSIOBuffer upstream_buffer; int64_t toread; int64_t avail; TSDebug("jcrusher", "Start of handle_buffering()"); /* Get the write VIO for the write operation that was performed on ourself. This VIO contains the buffer that we are to read from as well as the continuation we are to call when the buffer is empty. */ upstream_vio = TSVConnWriteVIOGet(contp); /* Create the output buffer and its associated reader */ if (!data->downstream_buffer) { data->downstream_buffer = TSIOBufferCreate(); TSAssert(data->downstream_buffer); data->downstream_reader = TSIOBufferReaderAlloc(data->downstream_buffer); TSAssert(data->downstream_reader); } /* We also check to see if the write VIO's buffer is non-NULL. A NULL buffer indicates that the write operation has been shutdown and that the continuation does not want us to send any more WRITE_READY or WRITE_COMPLETE events. For this buffered transformation that means we're done buffering data. */ upstream_buffer = TSVIOBufferGet(upstream_vio); if (NULL == upstream_buffer) { data->state = STATE_OUTPUT_DATA; TSDebug("jcrusher", "handle_buffering - upstream_buffer is NULL"); return 0; } /* Determine how much data we have left to read. For this bnull transform plugin this is also the amount of data we have left to write to the output connection. */ toread = TSVIONTodoGet(upstream_vio); TSDebug("jcrusher", "handle_buffering - toread is %" PRId64, toread); if (toread > 0) { /* The amount of data left to read needs to be truncated by the amount of data actually in the read buffer. */ avail = TSIOBufferReaderAvail(TSVIOReaderGet(upstream_vio)); if (toread > avail) { toread = avail; } TSDebug("jcrusher", "handle_buffering - toread is %" PRId64, toread); TSDebug("jcrusher", "handle_buffering - avail is %" PRId64, avail); TSIOBufferReader upstream_reader = TSVIOReaderGet(upstream_vio); TSIOBufferBlock upstream_blk = TSIOBufferReaderStart(upstream_reader); const char *input = TSIOBufferBlockReadStart(upstream_blk, upstream_reader, &toread); TSDebug("jcrusher", "handle_buffering - just read [%d] bytes from buffer", (int)strlen(input)); TSDebug("jcrusher", "handle_buffering - parse json input"); data->json_obj = json_tokener_parse_ex(data->json_tok, input, strlen(input)); if (json_tokener_success == (data->json_err = json_tokener_get_error(data->json_tok))) { TSDebug("jcrusher", "handle_buffering - got json_tokener_success"); data->state = STATE_OUTPUT_DATA; /* Call back the write VIO continuation to let it know that we have completed the write operation. */ TSContCall(TSVIOContGet(upstream_vio), TS_EVENT_VCONN_WRITE_COMPLETE, upstream_vio); return 1; } TSDebug("jcrusher", "handle_buffering - got json_tokener_continue"); /* Tell the read buffer that we have read the data and are no longer interested in it. */ TSIOBufferReaderConsume(TSVIOReaderGet(upstream_vio), toread); /* Modify the upstream VIO to reflect how much data we've completed. */ TSVIONDoneSet(upstream_vio, TSVIONDoneGet(upstream_vio) + toread); /* Now we check the upstream VIO to see if there is data left to read. */ /* Call back the upstream VIO continuation to let it know that we are ready for more data. */ TSContCall(TSVIOContGet(upstream_vio), TS_EVENT_VCONN_WRITE_READY, upstream_vio); } else { TSDebug("jcrusher", "handle_buffering - seems we read all"); data->state = STATE_OUTPUT_DATA; /* Call back the write VIO continuation to let it know that we have completed the write operation. */ TSContCall(TSVIOContGet(upstream_vio), TS_EVENT_VCONN_WRITE_COMPLETE, upstream_vio); } TSDebug("jcrusher", "handle_buffering - End"); return 1; }
static bool rpc_plugin_lookup_plugin(struct ubus_context *ctx, struct ubus_object *obj, char *strptr) { struct rpc_plugin_lookup_context c = { .id = obj->id, .name = strptr }; if (ubus_lookup(ctx, NULL, rpc_plugin_lookup_plugin_cb, &c)) return false; return c.found; } struct call_context { char path[PATH_MAX]; const char *argv[4]; char *method; char *input; json_tokener *tok; json_object *obj; bool input_done; bool output_done; }; static int rpc_plugin_call_stdin_cb(struct ustream *s, void *priv) { struct call_context *c = priv; if (!c->input_done) { ustream_write(s, c->input, strlen(c->input), false); c->input_done = true; } return 0; } static int rpc_plugin_call_stdout_cb(struct blob_buf *blob, char *buf, int len, void *priv) { struct call_context *c = priv; if (!c->output_done) { c->obj = json_tokener_parse_ex(c->tok, buf, len); if (json_tokener_get_error(c->tok) != json_tokener_continue) c->output_done = true; } return len; } static int rpc_plugin_call_stderr_cb(struct blob_buf *blob, char *buf, int len, void *priv) { return len; } static int rpc_plugin_call_finish_cb(struct blob_buf *blob, int stat, void *priv) { struct call_context *c = priv; int rv = UBUS_STATUS_INVALID_ARGUMENT; if (json_tokener_get_error(c->tok) == json_tokener_success) { if (c->obj) { if (json_object_get_type(c->obj) == json_type_object && blobmsg_add_object(blob, c->obj)) rv = UBUS_STATUS_OK; json_object_put(c->obj); } else { rv = UBUS_STATUS_NO_DATA; } } json_tokener_free(c->tok); free(c->input); free(c->method); return rv; } static int rpc_plugin_call(struct ubus_context *ctx, struct ubus_object *obj, struct ubus_request_data *req, const char *method, struct blob_attr *msg) { int rv = UBUS_STATUS_UNKNOWN_ERROR; struct call_context *c; char *plugin; c = calloc(1, sizeof(*c)); if (!c) goto fail; c->method = strdup(method); c->input = blobmsg_format_json(msg, true); c->tok = json_tokener_new(); if (!c->method || !c->input || !c->tok) goto fail; plugin = c->path + sprintf(c->path, "%s/", RPC_PLUGIN_DIRECTORY); if (!rpc_plugin_lookup_plugin(ctx, obj, plugin)) { rv = UBUS_STATUS_NOT_FOUND; goto fail; } c->argv[0] = c->path; c->argv[1] = "call"; c->argv[2] = c->method; return rpc_exec(c->argv, rpc_plugin_call_stdin_cb, rpc_plugin_call_stdout_cb, rpc_plugin_call_stderr_cb, rpc_plugin_call_finish_cb, c, ctx, req); fail: if (c) { if (c->method) free(c->method); if (c->input) free(c->input); if (c->tok) json_tokener_free(c->tok); free(c); } return rv; }
/* * process an assertion using the hosted verifier. * * TODO: local verification */ VerifyResult processAssertion(request_rec *r, const char *verifier_url, const char *assertion) { VerifyResult res = apr_pcalloc(r->pool, sizeof(struct _VerifyResult)); json_tokener *tok = json_tokener_new(); json_object *jobj = NULL; enum json_tokener_error jerr; char *assertionResult = verifyAssertionRemote(r, verifier_url, (char *) assertion); if (assertionResult) { jobj = json_tokener_parse_ex(tok, assertionResult, strlen(assertionResult)); jerr = json_tokener_get_error(tok); if (json_tokener_success != jerr) { res->errorResponse = apr_psprintf(r->pool, jsonErrorResponse, "malformed payload", json_tokener_error_desc(jerr)); json_tokener_free(tok); return res; } ap_log_rerror(APLOG_MARK, APLOG_DEBUG | APLOG_NOERRNO, 0, r, ERRTAG "Assertion (parsed) recieved is : %s", json_object_to_json_string(jobj)); } else { // XXX: verifyAssertionRemote should return specific error message. res->errorResponse = apr_psprintf(r->pool, jsonErrorResponse, "communication error", "can't contact verification server"); return res; } struct json_object_iterator it = json_object_iter_begin(jobj); struct json_object_iterator itEnd = json_object_iter_end(jobj); const char *reason = NULL; const char *status = "unknown"; int success = 0; while (!json_object_iter_equal(&it, &itEnd)) { const char *key = json_object_iter_peek_name(&it); json_object *val = json_object_iter_peek_value(&it); if (strncmp("email", key, 6) == 0) { res->verifiedEmail = apr_pstrdup(r->pool, json_object_get_string(val)); } else if (strncmp("issuer", key, 7) == 0) { res->identityIssuer = apr_pstrdup(r->pool, json_object_get_string(val)); } else if (strncmp("audience", key, 9) == 0) { res->audience = apr_pstrdup(r->pool, json_object_get_string(val)); } else if (strncmp("expires", key, 8) == 0) { apr_time_ansi_put(&res->expires, json_object_get_int64(val) / 1000); } else if (strncmp("reason", key, 7) == 0) { reason = json_object_get_string(val); } else if (strncmp("status", key, 7) == 0) { status = json_object_get_string(val); if (strncmp("okay", status, 5) == 0) { success = 1; } } json_object_iter_next(&it); } json_tokener_free(tok); // XXX: This is bad, doesn't catch multiple missing bits if (!res->verifiedEmail) { res->errorResponse = apr_pstrdup(r->pool, "Missing e-mail in assertion"); } if (!res->identityIssuer) { res->errorResponse = apr_pstrdup(r->pool, "Missing issuer in assertion"); } if (res->audience && strncmp(res->audience, r->server->server_hostname, strlen(r->server->server_hostname)) != 0) { res->errorResponse = apr_psprintf(r->pool, "Audience %s doesn't match %s", res->audience, r->server->server_hostname); } apr_time_t now = apr_time_now(); if (res->expires && res->expires <= now) { char exp_time[APR_RFC822_DATE_LEN]; apr_rfc822_date(exp_time, res->expires); res->errorResponse = apr_psprintf(r->pool, "Assertion expired on %s", exp_time); } if (!success) { if (reason) { res->errorResponse = apr_pstrdup(r->pool, reason); } else { res->errorResponse = apr_psprintf(r->pool, "Assertion failed with status '%s'", status); } } return res; }