/* *----------------------------------------------------------------------- * * get_bson_from_json_file -- * * Open the file at @filename and store its contents in a * bson_t. This function assumes that @filename contains a * single JSON object. * * NOTE: caller owns returned bson_t and must free it. * *----------------------------------------------------------------------- */ bson_t * get_bson_from_json_file(char *filename) { FILE *file; long length; bson_t *data; bson_error_t error; const char *buffer; file = fopen(filename, "r"); if (!file) { return NULL; } /* get file length */ fseek(file, 0, SEEK_END); length = ftell(file); fseek(file, 0, SEEK_SET); if (length < 1) { return NULL; } /* read entire file into buffer */ buffer = (const char *)bson_malloc0(length); if (fread((void *)buffer, 1, length, file) != length) { abort(); } fclose(file); if (!buffer) { return NULL; } /* convert to bson */ data = bson_new_from_json((const uint8_t*)buffer, length, &error); if (!data) { fprintf (stderr, "Cannot parse %s: %s\n", filename, error.message); abort(); } bson_free((void *)buffer); return data; }
int compare_json(const char *json, const char *jsonspec ){ bson_error_t error; bson_error_t error2; bson_t *spec; bson_t *doc; doc = bson_new_from_json (json, -1, &error); spec = bson_new_from_json (jsonspec, -1, &error2); const uint8_t *spec_bson = bson_get_data(spec); const uint8_t *doc_bson = bson_get_data(doc); int yes = compare(spec_bson, spec->len, doc_bson, doc->len); bson_free(spec); bson_free(doc); return yes; }
int main(void) { const uint8_t *json = (uint8_t*)"{ \"message\" : \"Hello World!\" }"; bson_error_t error; bson_t *doc; char *str; size_t len; if (!(doc = bson_new_from_json(json, -1, &error))) { fprintf(stderr, "Cannot initialize BSON structure from JSON example.\n"); fprintf(stderr, "BSON error message: %s\n", error.message); return 1; } printf("Has key \"message\" (expect \"yes\") : %s\n", yesno(bson_has_field(doc, "message"))); printf("Has key \"triceratops\" (expect\"no\") : %s\n", yesno(bson_has_field(doc, "triceratops"))); str = bson_as_json(doc, &len); printf("Original JSON document : %s\n", json); printf("BSON-to-JSON conversion : %s\n", str); bson_free(str); bson_destroy(doc); return 0; }
//valgrind -v --leak-check=full <this> bool test_bson_api(const char *json, const char *jsonspec) { bool same = false; bson_error_t error; bson_error_t error2; bson_t *spec; bson_t *doc; doc = bson_new_from_json (json, -1, &error); spec = bson_new_from_json (jsonspec, -1, &error2); const uint8_t *spec_bson = bson_get_data(spec); const uint8_t *doc_bson = bson_get_data(doc); int yes = compare(spec_bson, spec->len, doc_bson, doc->len); bson_destroy(doc); bson_destroy(spec); return yes; }
int compare_rgx_good(){ bson_error_t error; bson_error_t error2; bson_t *spec; bson_t *doc; const char *json = "{\"hello\": \"world\"}"; const char *jsonspec = "{\"hello\": {\"$options\": \"\", \"$regex\": \"orl\"}}"; doc = bson_new_from_json (json, -1, &error); spec = bson_new_from_json (jsonspec, -1, &error2); const uint8_t *spec_bson = bson_get_data(spec); const uint8_t *doc_bson = bson_get_data(doc); int yes = compare(spec_bson, spec->len, doc_bson, doc->len); bson_free(spec); bson_free(doc); return yes; }
SEXP R_json_to_bson(SEXP json){ bson_t *b; bson_error_t err; b = bson_new_from_json ((uint8_t*)translateCharUTF8(asChar(json)), -1, &err); if(!b) stop(err.message); return bson2r(b); }
int complex_search(){ bson_error_t error; bson_error_t error2; bson_t *spec; bson_t *doc; const char *json = "{\"a\": [{\"b\": [1, 2]}, {\"b\": [3, 5]}], \"c\": {\"d\": \"56b2cd28e138237eb96ff936\"}}"; const char *jsonspec = "{\"$and\": [{\"$or\": [{\"c.d\": \"56b2cd28e138237eb96ff936\"}]}, {\"$or\": [{\"a.0.b\": {\"$in\": [5, 30, 99]}}, {\"a.1.b\": {\"$in\": [5, 30, 99]}}]}]}"; doc = bson_new_from_json (json, -1, &error); spec = bson_new_from_json (jsonspec, -1, &error2); const uint8_t *spec_bson = bson_get_data(spec); const uint8_t *doc_bson = bson_get_data(doc); int yes = compare(spec_bson, spec->len, doc_bson, doc->len); BSON_ASSERT(yes); bson_free(spec); bson_free(doc); return yes; }
int telemetry_mongo_insert(centernode_t *cn, cnaccess_t* ca, cdnmsg_t *req) { mongoc_collection_t *collection; bson_error_t error; bson_t *doc; char docbuf[256] = {0}; #pragma pack(1) struct Info_s { uint16_t addr; uint32_t status; } *pInfo; #pragma pack(4) int size = (ntohl(req->pdu.len)-23)/6; uint8_t mytime[7]; memcpy ((void*)mytime, ((char*)&req->pdu + ntohl(req->pdu.len)-7), 7); pInfo = (struct Info_s*)&req->pdu.body.telemetry.addr; int i = 0; for (i=0; i<size; i++ ) { sprintf (docbuf, "{\"type\":%d,\"nodeid\":%d,\"addr\":%d,\"data\":%d,\"time\":\"20%02d-%02d-%02d %02d:%02d:%02d.%03d\"}", ntohs(req->pdu.cmd), ntohs(req->pdu.body.login.nodeid), ntohs(pInfo->addr), ntohl(pInfo->status), mytime[0], mytime[1], mytime[2], mytime[3], mytime[4], mytime[5], mytime[6]); doc = bson_new_from_json((uint8_t*)docbuf, strlen(docbuf), &error); if (strcmp (error.message, "") != 0) { fprintf (stderr, "%s\n", error.message); return 0; } // collection = getValidColl (); char szaddr[128] = {0}; sprintf (szaddr, "%x_%x_%x", ntohs(req->pdu.body.login.nodeid),ntohs(pInfo->addr),ntohs(req->pdu.cmd)); pthread_mutex_lock (&cn->Mongo[0].MongoLock); // collection = cn->Mongo[0].MongoCollection; collection = mongoc_client_get_collection (cn->Mongo[0].MongoClient, "mydb", szaddr); if (!mongoc_collection_insert (collection, MONGOC_INSERT_NONE, doc, NULL, &error)) { fprintf (stderr, "%s\n", error.message); } mongoc_collection_destroy (collection); pthread_mutex_unlock (&cn->Mongo[0].MongoLock); bson_destroy (doc); pInfo++; } return 0; }
document::value from_json(stdx::string_view json) { bson_error_t error; bson_t* result = bson_new_from_json(reinterpret_cast<const uint8_t*>(json.data()), json.size(), &error); if (!result) throw exception(error_code::k_json_parse_failure, error.message); std::uint32_t length; std::uint8_t* buf = bson_destroy_with_steal(result, true, &length); return document::value{buf, length, bson_free_deleter}; }
stdx::optional<document::value> from_json(stdx::string_view json) { bson_error_t error; bson_t* result = bson_new_from_json(reinterpret_cast<const uint8_t*>(json.data()), json.size(), &error); if (!result) return stdx::nullopt; std::uint32_t length; std::uint8_t* buf = bson_destroy_with_steal(result, true, &length); return document::value{buf, length, bson_free_deleter}; }
SEXP R_mongo_collection_insert_page(SEXP ptr_col, SEXP json_vec, SEXP stop_on_error){ if(!Rf_isString(json_vec) || !Rf_length(json_vec)) stop("json_vec must be character string of at least length 1"); //ordered means serial execution bool ordered = Rf_asLogical(stop_on_error); //create bulk operation bson_error_t err; bson_t *b; bson_t reply; mongoc_bulk_operation_t *bulk = mongoc_collection_create_bulk_operation_with_opts (r2col(ptr_col), NULL); for(int i = 0; i < Rf_length(json_vec); i++){ b = bson_new_from_json ((uint8_t*) Rf_translateCharUTF8(Rf_asChar(STRING_ELT(json_vec, i))), -1, &err); if(!b){ mongoc_bulk_operation_destroy (bulk); stop(err.message); } mongoc_bulk_operation_insert(bulk, b); bson_destroy (b); b = NULL; } //execute bulk operation bool success = mongoc_bulk_operation_execute (bulk, &reply, &err); mongoc_bulk_operation_destroy (bulk); //check for errors if(!success){ if(ordered){ Rf_errorcall(R_NilValue, err.message); } else { Rf_warningcall(R_NilValue, "Not all inserts were successful: %s\n", err.message); } } //get output SEXP out = PROTECT(bson2list(&reply)); bson_destroy (&reply); UNPROTECT(1); return out; }
int cbson_from_json(lua_State *L) { bson_t *bson; size_t len; bson_error_t error; const uint8_t* data = (uint8_t*)luaL_checklstring(L, 1, &len); bson = bson_new_from_json(data, len, &error); if (bson) { const uint8_t* data=bson_get_data(bson); lua_pushlstring(L, (const char*)data, bson->len); bson_destroy(bson); } else { luaL_error(L, error.message); } return 1; }
int telecommunicating_mongo_insert (centernode_t *cn, cnaccess_t* ca, cdnmsg_t *req) { mongoc_collection_t *collection; bson_error_t error; bson_t *doc; char docbuf[256] = {0}; sprintf (docbuf, "{\"type\":%d,\"nodeid\":%d,\"addr\":%d,\"switch\":%d,\"time\":\"20%02d-%02d-%02d %02d:%02d:%02d.%03d\"}", ntohs(req->pdu.cmd), ntohs(req->pdu.body.login.nodeid), ntohs(req->pdu.telemetryreq.addr), ntohl(req->pdu.telemetryreq.status), (uint8_t)req->pdu.telemetryreq.time[0], (uint8_t)req->pdu.telemetryreq.time[1], (uint8_t)req->pdu.telemetryreq.time[2], (uint8_t)req->pdu.telemetryreq.time[3], (uint8_t)req->pdu.telemetryreq.time[4], (uint8_t)req->pdu.telemetryreq.time[5], (uint8_t)req->pdu.telemetryreq.time[6]); doc = bson_new_from_json((uint8_t*)docbuf, strlen(docbuf), &error); if (strcmp(error.message,"") != 0) { fprintf (stderr, "%s\n", error.message); return 0; } char szaddr[128] = {0}; sprintf (szaddr, "%x_%x_%x", ntohs(req->pdu.body.login.nodeid),ntohs(req->pdu.telemetryreq.addr),ntohs(req->pdu.cmd)); // sprintf (szaddr, "%x", ntohs(req->pdu.telemetryreq.addr)); pthread_mutex_lock (&cn->Mongo[0].MongoLock); // collection = cn->Mongo[0].MongoCollection; collection = mongoc_client_get_collection (cn->Mongo[0].MongoClient, "mydb", szaddr); if (!mongoc_collection_insert (collection, MONGOC_INSERT_NONE, doc, NULL, &error)) { fprintf (stderr, "%s\n", error.message); } mongoc_collection_destroy (collection); pthread_mutex_unlock (&cn->Mongo[0].MongoLock); bson_destroy (doc); return 0; }
static void test_mongoc_matcher_logic_ops (void) { logic_op_test_t tests[] = { {"{\"$or\": [{\"a\": 1}, {\"b\": 2}]}", "{\"a\": 1}", true}, {"{\"$or\": [{\"a\": 1}, {\"b\": 2}]}", "{\"b\": 2}", true}, {"{\"$or\": [{\"a\": 1}, {\"b\": 2}]}", "{\"a\": 3}", false}, { "{\"$or\": [{\"a\": {\"$gt\": 1}}, {\"a\": {\"$lt\": -1}}]}", "{\"a\": 3}", true }, { "{\"$or\": [{\"a\": {\"$gt\": 1}}, {\"a\": {\"$lt\": -1}}]}", "{\"a\": -2}", true }, { "{\"$or\": [{\"a\": {\"$gt\": 1}}, {\"a\": {\"$lt\": -1}}]}", "{\"a\": 0}", false }, {"{\"$and\": [{\"a\": 1}, {\"b\": 2}]}", "{\"a\": 1, \"b\": 2}", true}, {"{\"$and\": [{\"a\": 1}, {\"b\": 2}]}", "{\"a\": 1, \"b\": 1}", false}, {"{\"$and\": [{\"a\": 1}, {\"b\": 2}]}", "{\"a\": 1}", false}, {"{\"$and\": [{\"a\": 1}, {\"b\": 2}]}", "{\"b\": 2}", false}, { "{\"$and\": [{\"a\": {\"$gt\": -1}}, {\"a\": {\"$lt\": 1}}]}", "{\"a\": 0}", true }, { "{\"$and\": [{\"a\": {\"$gt\": -1}}, {\"a\": {\"$lt\": 1}}]}", "{\"a\": -2}", false }, { "{\"$and\": [{\"a\": {\"$gt\": -1}}, {\"a\": {\"$lt\": 1}}]}", "{\"a\": 1}", false }, }; int n_tests = sizeof tests / sizeof (logic_op_test_t); int i; logic_op_test_t test; bson_t *spec; bson_error_t error; mongoc_matcher_t *matcher; bson_t *doc; bool r; for (i = 0; i < n_tests; i++) { test = tests[i]; spec = bson_new_from_json ((uint8_t * )test.spec, -1, &error); if (!spec) { fprintf (stderr, "couldn't parse JSON query:\n\n%s\n\n%s\n", test.spec, error.message); abort (); } matcher = mongoc_matcher_new (spec, &error); BSON_ASSERT (matcher); doc = bson_new_from_json ((uint8_t * )test.doc, -1, &error); if (!doc) { fprintf (stderr, "couldn't parse JSON document:\n\n%s\n\n%s\n", test.doc, error.message); abort (); } r = mongoc_matcher_match (matcher, doc); if (test.match != r) { fprintf (stderr, "query:\n\n%s\n\nshould %shave matched:\n\n%s\n", test.match ? "" : "not ", test.spec, test.doc); abort (); } mongoc_matcher_destroy (matcher); bson_destroy (doc); bson_destroy (spec); } }
void test_bson_type_decimal128 (const uint8_t *bson_str, uint32_t bson_str_len, const uint8_t *canonical_bson_str, uint32_t canonical_bson_str_len, const uint8_t *extjson_str, uint32_t extjson_str_len, const uint8_t *canonical_extjson_str, uint32_t canonical_extjson_str_len, bool lossy) { char bson_string[BSON_DECIMAL128_STRING]; char json_string[BSON_DECIMAL128_STRING]; bson_t bson; bson_t canonical_bson; bson_t *extjson; bson_t *canonical_extjson; bson_decimal128_t bson_decimal128; bson_decimal128_t json_decimal128; bson_iter_t iter; char *str1, *str2; bson_error_t error; BSON_ASSERT (bson_str); BSON_ASSERT (canonical_bson_str); BSON_ASSERT (extjson_str); BSON_ASSERT (canonical_extjson_str); bson_init_static (&bson, bson_str, bson_str_len); bson_init_static (&canonical_bson, canonical_bson_str, canonical_bson_str_len); ASSERT_CMPUINT8 (bson_get_data (&bson), bson_str); /* B->cB */ ASSERT_CMPUINT8 (bson_get_data (&bson), canonical_bson_str); /* cB->cB */ ASSERT_CMPUINT8 (bson_get_data (&canonical_bson), canonical_bson_str); extjson = bson_new_from_json (extjson_str, extjson_str_len, &error); canonical_extjson = bson_new_from_json (canonical_extjson_str, canonical_extjson_str_len, &error); /* B->cE */ ASSERT_CMPJSON (bson_as_json (&bson, NULL), canonical_extjson_str); /* E->cE */ ASSERT_CMPJSON (bson_as_json (extjson, NULL), canonical_extjson_str); /* cb->cE */ ASSERT_CMPUINT8 (bson_get_data (&canonical_bson), bson_get_data (canonical_extjson)); /* cE->cE */ ASSERT_CMPJSON (bson_as_json (canonical_extjson, NULL), canonical_extjson_str); if (!lossy) { /* E->cB */ ASSERT_CMPJSON (bson_as_json (extjson, NULL), canonical_extjson_str); /* cE->cB */ ASSERT_CMPUINT8 (bson_get_data (canonical_extjson), canonical_bson_str); } bson_destroy (extjson); bson_destroy (canonical_extjson); }
/** * Main module procedure. * @param[in] instance Module instance. * @param[in] request Radius request. * @return */ static rlm_rcode_t mod_proc(void *instance, REQUEST *request) { rlm_mongodb_t *inst = instance; rlm_mongodb_conn_t *conn = NULL; rlm_rcode_t code = RLM_MODULE_FAIL; bson_error_t error; conn = fr_connection_get(inst->pool); if (!conn) { goto end; } if (inst->action == RLM_MONGODB_GET) { // TODO: implement me! code = RLM_MODULE_FAIL; } else { mongoc_collection_t *mongo_collection = NULL; char *db = NULL, *collection = NULL, *query = NULL, *sort = NULL, *update = NULL; bson_t *bson_query = NULL, *bson_sort = NULL, *bson_update = NULL; if (tmpl_aexpand(request, &db, request, inst->cfg.db, NULL, NULL) < 0) { ERROR("failed to substitute attributes for db '%s'", inst->cfg.db->name); goto end_set; } if (tmpl_aexpand(request, &collection, request, inst->cfg.collection, NULL, NULL) < 0) { ERROR("failed to substitute attributes for collection '%s'", inst->cfg.collection->name); goto end_set; } ssize_t query_len = tmpl_aexpand(request, &query, request, inst->cfg.search_query, NULL, NULL); if (query_len < 0) { ERROR("failed to substitute attributes for search query '%s'", inst->cfg.search_query->name); goto end_set; } bson_query = bson_new_from_json((uint8_t *) query, query_len, &error); if (!bson_query) { RERROR("JSON->BSON conversion failed for search query '%s': %d.%d %s", query, error.domain, error.code, error.message); goto end_set; } ssize_t sort_len = tmpl_aexpand(request, &sort, request, inst->cfg.sort_query, NULL, NULL); if (query_len < 0) { ERROR("failed to substitute attributes for sort query '%s'", inst->cfg.sort_query->name); goto end_set; } if (sort_len) { bson_sort = bson_new_from_json((uint8_t *) sort, sort_len, &error); if (!bson_sort) { RERROR("JSON->BSON conversion failed for sort query '%s': %d.%d %s", sort, error.domain, error.code, error.message); goto end_set; } } ssize_t update_len = tmpl_aexpand(request, &update, request, inst->cfg.update_query, NULL, NULL); if (query_len < 0) { ERROR("failed to substitute attributes for update query '%s'", inst->cfg.update_query->name); goto end_set; } if (update_len) { bson_update = bson_new_from_json((uint8_t *) update, update_len, &error); if (!bson_update) { RERROR("JSON->BSON conversion failed for update query '%s': %d.%d %s", update, error.domain, error.code, error.message); goto end_set; } } mongo_collection = mongoc_client_get_collection(conn->client, db, collection); if (!mongo_collection) { RERROR("failed to get collection %s/%s", db, collection); goto end_set; } bool ok = mongoc_collection_find_and_modify(mongo_collection, bson_query, bson_sort, bson_update, NULL, inst->cfg.remove, inst->cfg.upsert, false, NULL, &error); code = ok ? RLM_MODULE_OK : RLM_MODULE_FAIL; end_set: if (mongo_collection) mongoc_collection_destroy(mongo_collection); if (bson_query) bson_destroy(bson_query); if (bson_sort) bson_destroy(bson_sort); if (bson_update) bson_destroy(bson_update); } end: if (conn) fr_connection_release(inst->pool, conn); return code; }
void aggregate_group(aggregation_state_t *state, char *document_json) { bson_t *group_document; bson_error_t error; group_document = bson_new_from_json (document_json, -1, &error); if (!group_document) { //TODO: Handle error return; } // Step 1: Find the _specification of the _id from the group documnent bson_value_t *id_spec = _aggregate_get_value_at_key(group_document, "_id"); bson_t *new_docs[(*state).docs_len]; int new_docs_len = 0; // Step 2: Aggregate each document in the state size_t index; for(index = 0; index < (*state).docs_len; index++) { // Step 2a: Create a copy of the _id spec, with replacements from the source documnent bson_t orig_doc; orig_doc = (*state).docs[index]; bson_value_t *id_value = _aggregate_group_id_replace(id_spec, &orig_doc); // // Step 2b: Figure of if this _id already exists in the result set and create a new one // // if it doesn't bson_t *aggregate_doc = NULL; size_t aggregate_doc_index = 0; size_t new_docs_index; for(new_docs_index = 0; new_docs_index < new_docs_len; new_docs_index++) { bson_t *new_doc = new_docs[new_docs_index]; if (_aggregate_have_same_id(new_doc, id_value)) { aggregate_doc = new_doc; aggregate_doc_index = new_docs_index; break; } } if (aggregate_doc == NULL) { bson_t *doc = bson_new(); bson_append_value(doc, "_id", -1, id_value); aggregate_doc = doc; aggregate_doc_index = new_docs_len; new_docs[new_docs_len++] = aggregate_doc; } // // Step 2c: Build rest of the result document using the current result and // // group document bson_iter_t iter; if (!bson_iter_init (&iter, group_document)) { //TODO: Handle error continue; } bson_t *merged_aggregate_doc = bson_new(); _aggregate_recurse_fill(&iter, &orig_doc, aggregate_doc, merged_aggregate_doc , ""); new_docs[aggregate_doc_index] = merged_aggregate_doc; } size_t result_index = 0; size_t new_docs_index = 0; bson_t *result_docs = malloc(new_docs_len * sizeof(bson_t)); for(new_docs_index = 0; new_docs_index < new_docs_len; new_docs_index++) { bson_t *new_doc = new_docs[new_docs_index]; result_docs[result_index] = *new_doc; result_index++; } (*state).docs = result_docs; (*state).docs_len = result_index; }