int main (int argc, char *argv[]) { int i; int n; int bcon; bson_t bson, foo, bar, baz; bson_init (&bson); if (argc != 3) { fprintf (stderr, "usage: bcon-speed NUM_ITERATIONS [y|n]\n" "\n" " y = perform speed tests with bcon\n" " n = perform speed tests with bson_append\n" "\n"); return EXIT_FAILURE; } assert (argc == 3); n = atoi (argv[1]); bcon = (argv[2][0] == 'y') ? 1 : 0; for (i = 0; i < n; i++) { if (bcon) { BCON_APPEND (&bson, "foo", "{", "bar", "{", "baz", "[", BCON_INT32 (1), BCON_INT32 (2), BCON_INT32 (3), "]", "}", "}"); } else { bson_append_document_begin (&bson, "foo", -1, &foo); bson_append_document_begin (&foo, "bar", -1, &bar); bson_append_array_begin (&bar, "baz", -1, &baz); bson_append_int32 (&baz, "0", -1, 1); bson_append_int32 (&baz, "1", -1, 2); bson_append_int32 (&baz, "2", -1, 3); bson_append_array_end (&bar, &baz); bson_append_document_end (&foo, &bar); bson_append_document_end (&bson, &foo); } bson_reinit (&bson); } bson_destroy (&bson); return 0; }
/* Construct the aggregate command in cmd: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); bson_append_utf8 (command, "aggregate", 9, stream->coll->collection, stream->coll->collectionlen); bson_append_array_begin (command, "pipeline", 8, &pipeline); /* Append the $changeStream stage */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, &stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* The user pipeline may consist of invalid stages or non-documents. * Append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
static void test_create_collection (void) { mongoc_database_t *database; mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error = { 0 }; bson_t options; bson_t storage_opts; bson_t wt_opts; char *dbname; char *name; bool r; client = mongoc_client_new (gTestUri); assert (client); dbname = gen_collection_name ("dbtest"); database = mongoc_client_get_database (client, dbname); assert (database); bson_free (dbname); bson_init (&options); BSON_APPEND_INT32 (&options, "size", 1234); BSON_APPEND_INT32 (&options, "max", 4567); BSON_APPEND_BOOL (&options, "capped", true); BSON_APPEND_BOOL (&options, "autoIndexId", true); BSON_APPEND_DOCUMENT_BEGIN(&options, "storage", &storage_opts); BSON_APPEND_DOCUMENT_BEGIN(&storage_opts, "wiredtiger", &wt_opts); BSON_APPEND_UTF8(&wt_opts, "configString", "block_compressor=zlib"); bson_append_document_end(&storage_opts, &wt_opts); bson_append_document_end(&options, &storage_opts); name = gen_collection_name ("create_collection"); collection = mongoc_database_create_collection (database, name, &options, &error); assert (collection); bson_destroy (&options); bson_free (name); r = mongoc_collection_drop (collection, &error); assert (r); r = mongoc_database_drop (database, &error); assert (r); mongoc_collection_destroy (collection); mongoc_database_destroy (database); mongoc_client_destroy (client); }
// Find the spend of a specified output_reference within a given blockheight range (main chain only) // if found, load tx and the input that spends it int database_find_blockchain_spend(struct database* db, struct transaction_output_reference* output_reference, size_t start_height, size_t max_height, struct transaction** tx) { mongoc_collection_t* collection = mongoc_client_get_collection(db->client, database_name(db), "transactions"); // Build a query doc bson_t* query = bson_new(); // Build a query that tries to find where this output_reference is spent unsigned char hash[32]; transaction_output_reference_hash(output_reference, hash); bson_t* output_reference_doc = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(query, "inputs.output_reference", output_reference_doc); BSON_APPEND_BINARY(output_reference_doc, "hash", BSON_SUBTYPE_BINARY, (uint8_t*)hash, 32); BSON_APPEND_INT32(output_reference_doc, "index", transaction_output_reference_index(output_reference)); bson_append_document_end(query, output_reference_doc); // Force the height to be valid bson_t* height_doc = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(query, "height", height_doc); BSON_APPEND_INT32(height_doc, "$lte", (int)max_height); BSON_APPEND_INT32(height_doc, "$gte", start_height); bson_append_document_end(query, height_doc); // Perform find mongoc_cursor_t* cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); bson_error_t error; if(cursor == NULL || mongoc_cursor_error(cursor, &error)) { printf("MongoDB error: %s\n", (cursor == NULL) ? "NULL cursor" : error.message); return -1; } bson_t const* doc; int found = 0; while(mongoc_cursor_next(cursor, &doc) != 0) { if(tx != NULL) { *tx = transaction_from_bson(doc); } found = 1; break; } mongoc_cursor_destroy(cursor); bson_destroy(height_doc); bson_destroy(output_reference_doc); bson_destroy(query); mongoc_collection_destroy(collection); return found; }
void SettingsOutput::SetSubbasinIDs() { bson_t *b = bson_new(); bson_t *child = bson_new(); bson_t *child2 = bson_new(); bson_t *child3 = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(b, "$query", child); BSON_APPEND_DOCUMENT_BEGIN(child, PARAM_FLD_NAME, child2); BSON_APPEND_ARRAY_BEGIN(child2, "$in", child3); BSON_APPEND_UTF8(child3,PARAM_FLD_NAME, VAR_OUTLETID); BSON_APPEND_UTF8(child3,PARAM_FLD_NAME, VAR_SUBBSNID_NUM); bson_append_array_end(child2, child3); bson_append_document_end(child, child2); bson_append_document_end(b, child); //printf("%s\n",bson_as_json(b,NULL)); mongoc_cursor_t *cursor; const bson_t *bsonTable; mongoc_collection_t *collection; collection = mongoc_client_get_collection(m_conn, m_dbName.c_str(), DB_TAB_PARAMETERS); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, b, NULL, NULL); bson_iter_t iter; while (mongoc_cursor_more(cursor) && mongoc_cursor_next(cursor, &bsonTable)) { string nameTmp = ""; int numTmp = -1; if (bson_iter_init_find(&iter, bsonTable, PARAM_FLD_NAME)) nameTmp = GetStringFromBSONITER(&iter); if (bson_iter_init_find(&iter, bsonTable, PARAM_FLD_VALUE)) numTmp = GetIntFromBSONITER(&iter); if(!StringMatch(nameTmp, "") && numTmp != -1) { if(StringMatch(nameTmp, VAR_OUTLETID)) m_outletID = GetIntFromBSONITER(&iter); else if (StringMatch(nameTmp, VAR_SUBBSNID_NUM)) m_nSubbasins = GetIntFromBSONITER(&iter); } else throw ModelException("SettingOutput","SetSubbasinIDs","No valid values found in MongoDB!"); } bson_destroy(child); bson_destroy(child2); bson_destroy(child3); bson_destroy(b); mongoc_collection_destroy(collection); mongoc_cursor_destroy(cursor); return; }
/* * Return true if we build the document, and it's not too big * false if there's no way to prevent the doc from being too big. In this * case, the caller shouldn't include it with isMaster */ bool _mongoc_metadata_build_doc_with_application (bson_t *doc, const char *appname) { const mongoc_metadata_t *md = &gMongocMetadata; bson_t child; if (appname) { BSON_APPEND_DOCUMENT_BEGIN (doc, "application", &child); BSON_APPEND_UTF8 (&child, "name", appname); bson_append_document_end (doc, &child); } BSON_APPEND_DOCUMENT_BEGIN (doc, "driver", &child); BSON_APPEND_UTF8 (&child, "name", md->driver_name); BSON_APPEND_UTF8 (&child, "version", md->driver_version); bson_append_document_end (doc, &child); BSON_APPEND_DOCUMENT_BEGIN (doc, "os", &child); BSON_ASSERT (md->os_type); BSON_APPEND_UTF8 (&child, "type", md->os_type); if (md->os_name) { BSON_APPEND_UTF8 (&child, "name", md->os_name); } if (md->os_version) { BSON_APPEND_UTF8 (&child, "version", md->os_version); } if (md->os_architecture) { BSON_APPEND_UTF8 (&child, "architecture", md->os_architecture); } bson_append_document_end (doc, &child); if (doc->len > METADATA_MAX_SIZE) { /* We've done all we can possibly do to ensure the current * document is below the maxsize, so if it overflows there is * nothing else we can do, so we fail */ return false; } if (md->platform) { _append_platform_field (doc, md->platform); } return true; }
static mongoc_cursor_t * query_collection (mongoc_collection_t *collection, uint32_t last_time) { mongoc_cursor_t *cursor; bson_t query; bson_t gt; BSON_ASSERT(collection); bson_init(&query); bson_append_document_begin(&query, "ts", 2, >); bson_append_timestamp(>, "$gt", 3, last_time, 0); bson_append_document_end(&query, >); cursor = mongoc_collection_find(collection, (MONGOC_QUERY_TAILABLE_CURSOR | MONGOC_QUERY_AWAIT_DATA | MONGOC_QUERY_SLAVE_OK), 0, 0, 0, &query, NULL, NULL); bson_destroy(&query); return cursor; }
static int mongo_del_origin(u08bits *origin) { mongoc_collection_t * collection = mongo_get_collection("realm"); if(!collection) return -1; int ret = -1; bson_t query, doc, child; bson_init(&query); bson_init(&doc); bson_append_document_begin(&doc, "$pull", -1, &child); BSON_APPEND_UTF8(&child, "origin", (const char *)origin); bson_append_document_end(&doc, &child); if (!mongoc_collection_update(collection, MONGOC_UPDATE_MULTI_UPDATE, &query, &doc, NULL, NULL)) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error deleting origin information\n"); } else { ret = 0; } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&doc); return ret; }
static int mongo_add_origin(u08bits *origin, u08bits *realm) { mongoc_collection_t * collection = mongo_get_collection("realm"); if(!collection) return -1; int ret = -1; u08bits realm0[STUN_MAX_REALM_SIZE+1] = "\0"; if(!realm) realm=realm0; bson_t query, doc, child; bson_init(&query); BSON_APPEND_UTF8(&query, "realm", (const char *)realm); bson_init(&doc); bson_append_document_begin(&doc, "$addToSet", -1, &child); BSON_APPEND_UTF8(&child, "origin", (const char *)origin); bson_append_document_end(&doc, &child); if (!mongoc_collection_update(collection, MONGOC_UPDATE_UPSERT, &query, &doc, NULL, NULL)) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error inserting/updating realm origin information\n"); } else { ret = 0; } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&doc); return ret; }
static void test_bson_build_child (void) { bson_t b; bson_t child; bson_t *b2; bson_t *child2; bson_init(&b); assert(bson_append_document_begin(&b, "foo", -1, &child)); assert(bson_append_utf8(&child, "bar", -1, "baz", -1)); assert(bson_append_document_end(&b, &child)); b2 = bson_new(); child2 = bson_new(); assert(bson_append_utf8(child2, "bar", -1, "baz", -1)); assert(bson_append_document(b2, "foo", -1, child2)); bson_destroy(child2); assert(b.len == b2->len); assert_bson_equal(&b, b2); bson_destroy(&b); bson_destroy(b2); }
static void append_write_err (bson_t *doc, uint32_t code, const char *errmsg, size_t errmsg_len, const bson_t *errinfo) { bson_t array = BSON_INITIALIZER; bson_t child; BSON_ASSERT (errmsg); /* writeErrors: [{index: 0, code: code, errmsg: errmsg, errInfo: {...}}] */ bson_append_document_begin (&array, "0", 1, &child); bson_append_int32 (&child, "index", 5, 0); bson_append_int32 (&child, "code", 4, (int32_t) code); bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len); if (errinfo) { bson_append_document (&child, "errInfo", 7, errinfo); } bson_append_document_end (&array, &child); bson_append_array (doc, "writeErrors", 11, &array); bson_destroy (&array); }
static bool append_value(bson_t* bson, const char* key, size_t length, object_t* value) { switch (value->type) { case type_nil: bson_append_null(bson, key, length); break; case type_bool: bson_append_bool(bson, key, length, value->b); break; case type_double: bson_append_double(bson, key, length, value->d); break; case type_str: bson_append_utf8(bson, key, length, value->str, value->l); break; case type_int: append_int(bson, key, length, value->i); break; case type_uint: append_int(bson, key, length, (int64_t)value->u); break; case type_map: { bson_t child; bson_append_document_begin(bson, key, length, &child); append_document(&child, value); bson_append_document_end(bson, &child); } break; case type_array: { bson_t child; bson_append_array_begin(bson, key, length, &child); append_array(&child, value); bson_append_array_end(bson, &child); } break; default: return false; } return true; }
~frame() { if (is_array) { bson_append_array_end(parent, &bson); } else { bson_append_document_end(parent, &bson); } }
/* Check a correct CONNECT message */ static gboolean sim_parser_connect_test1 (void) { bson_t *bson_connect = bson_new (); bson_t child; uint8_t uuid[]={0x07,0x92,0xd6,0x72,0xf4,0xce,0x11,0xe4,0x9d,0xe2,0x00,0x0c,0x29,0xd9,0x46,0xde}; SimParser *parser = NULL; SimCommand *cmd = NULL; gboolean result = FALSE; bson_append_document_begin (bson_connect,"connect", -1, &child); BSON_APPEND_INT32 (&child, "id", 10); BSON_APPEND_INT32 (&child, "type", SIM_SESSION_TYPE_WEB); BSON_APPEND_UTF8 (&child, "version", "5.0.1"); if (bson_append_binary (&child, "sensor_id", -1, BSON_SUBTYPE_UUID, uuid, 16) == FALSE) return FALSE; bson_append_document_end (bson_connect, &child); /* Check */ bson_iter_t iter; bson_iter_init (&iter, bson_connect); do{ if ((parser = sim_parser_new()) == NULL) { result = FALSE; break; } if ((cmd = sim_parser_bson (parser, bson_get_data (bson_connect), bson_connect->len)) == NULL) { result = FALSE; break; } if (cmd->type != SIM_COMMAND_TYPE_CONNECT) { result = FALSE; break; } if (cmd->data.connect.sensor_ver->major != 5 || cmd->data.connect.sensor_ver->minor != 0 || cmd->data.connect.sensor_ver->micro != 1) { result = FALSE; break; } if (cmd->data.connect.sensor_id == NULL) { result = FALSE; break; } /* Check uuid */ SimUuid * uuidbin = sim_uuid_new_from_bin (uuid); gboolean test = sim_uuid_equal (uuidbin, cmd->data.connect.sensor_id); g_object_unref (uuidbin); if (!test) { result = FALSE; break; } result = TRUE; } while (0); bson_destroy (bson_connect); g_object_unref (parser); return result; }
static gboolean sim_parser_connect_test3 (void) { bson_t *bson_connect = bson_new (); bson_t child; SimParser *parser = NULL; SimCommand *cmd = NULL; gboolean result = FALSE; uint8_t uuid[]={0x07,0x92,0xd6,0x72,0xf4,0xce,0x11,0xe4,0x9d,0xe2,0x00,0x0c,0x29,0xd9,0x46,0xde}; bson_append_document_begin (bson_connect,"connect", -1, &child); BSON_APPEND_INT32 (&child, "id", 10); BSON_APPEND_INT32 (&child, "type", SIM_SESSION_TYPE_WEB); BSON_APPEND_UTF8 (&child, "version", "x.x.x"); if (bson_append_binary (&child, "sensor_id", -1, BSON_SUBTYPE_UUID, uuid, 16) == FALSE) return FALSE; bson_append_document_end (bson_connect, &child); do{ if ((parser = sim_parser_new()) == NULL) { result = FALSE; break; } if ((cmd = sim_parser_bson (parser, bson_get_data (bson_connect), bson_connect->len)) != NULL) { result = FALSE; break; } result = TRUE; } while (0); return result; }
static mongoc_cursor_t * query_collection (mongoc_collection_t *collection, uint32_t last_time) { mongoc_cursor_t *cursor; bson_t query; bson_t gt; bson_t opts; BSON_ASSERT (collection); bson_init (&query); BSON_APPEND_DOCUMENT_BEGIN (&query, "ts", >); BSON_APPEND_TIMESTAMP (>, "$gt", last_time, 0); bson_append_document_end (&query, >); bson_init (&opts); BSON_APPEND_BOOL (&opts, "tailable", true); BSON_APPEND_BOOL (&opts, "awaitData", true); cursor = mongoc_collection_find_with_opts (collection, &query, &opts, NULL); bson_destroy (&query); bson_destroy (&opts); return cursor; }
/* check a missing mandatory fix */ static gboolean sim_parser_connect_test2 (void) { bson_t *bson_connect = bson_new (); bson_t child; SimParser *parser = NULL; SimCommand *cmd = NULL; gboolean result = FALSE; bson_append_document_begin (bson_connect,"connect", -1, &child); BSON_APPEND_INT32 (&child, "id", 10); BSON_APPEND_INT32 (&child, "type", SIM_SESSION_TYPE_WEB); BSON_APPEND_UTF8 (&child, "version", "5.0.1"); bson_append_document_end (bson_connect, &child); do{ if ((parser = sim_parser_new()) == NULL) { result = FALSE; break; } if ((cmd = sim_parser_bson (parser, bson_get_data (bson_connect), bson_connect->len)) != NULL) { result = FALSE; break; } result = TRUE; } while (0); return result; }
void _aggregate_recurse_fill(bson_iter_t *iter, bson_t* new_doc, bson_t* existing_aggregate_doc, bson_t* merged_aggregate_doc, const char *key) { bson_iter_t child_iter; bson_t child_doc; while (bson_iter_next (iter)) { int new_key_length = strlen(bson_iter_key(iter)); if (strcmp("", key) != 0) { new_key_length += strlen(key) + 1; } char new_key[new_key_length]; if (strcmp("", key) == 0) { strcpy(new_key, bson_iter_key(iter)); } else { strcpy(new_key, key); strcat(new_key, "."); strcat(new_key, bson_iter_key(iter)); } if (strcmp("_id", new_key) == 0) { bson_value_t *existing_id = _aggregate_get_value_at_key(existing_aggregate_doc, "_id"); bson_append_value(merged_aggregate_doc, "_id", -1, existing_id); continue; } if (BSON_ITER_HOLDS_DOCUMENT (iter)) { const char *agg_key = NULL; const bson_value_t *agg_field = NULL; if (bson_iter_recurse (iter, &child_iter)) { if (bson_iter_next (&child_iter) && _aggregate_is_agg_operator(bson_iter_key(&child_iter))) { agg_key = bson_iter_key(&child_iter); agg_field = bson_iter_value(&child_iter); } if (agg_key && !bson_iter_next (&child_iter)) { bson_value_t *existing_value = _aggregate_get_value_at_key(existing_aggregate_doc, new_key); bson_value_t *new_doc_value = _aggregate_get_value_at_key(new_doc, (*agg_field).value.v_utf8.str + 1); bson_value_t * agg_result = _aggregate(existing_value, new_doc_value, agg_key); bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, agg_result); continue; } } bson_append_document_begin (merged_aggregate_doc, bson_iter_key(iter), -1, &child_doc); if (bson_iter_recurse (iter, &child_iter)) { _aggregate_recurse_fill (&child_iter, new_doc, existing_aggregate_doc, &child_doc, new_key); } bson_append_document_end (merged_aggregate_doc, &child_doc); } else { bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, bson_iter_value(iter)); } } }
TBson TBson::toBson(const QVariantMap &query, const QVariantMap &orderBy) { TBson ret; bson_t child; // query clause BSON_APPEND_DOCUMENT_BEGIN((bson_t *)ret.data(), "$query", &child); appendBson(&child, query); bson_append_document_end((bson_t *)ret.data(), &child); // orderBy clause if (!orderBy.isEmpty()) { BSON_APPEND_DOCUMENT_BEGIN((bson_t *)ret.data(), "$orderby", &child); appendBson(&child, orderBy); bson_append_document_end((bson_t *)ret.data(), &child); } return ret; }
mongoc_cursor_t * mongoc_database_find_collections (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_cursor_t *cursor; mongoc_read_prefs_t *read_prefs; bson_t cmd = BSON_INITIALIZER; bson_t child; bson_error_t lerror; BSON_ASSERT (database); BSON_APPEND_INT32 (&cmd, "listCollections", 1); if (filter) { BSON_APPEND_DOCUMENT (&cmd, "filter", filter); BSON_APPEND_DOCUMENT_BEGIN (&cmd, "cursor", &child); bson_append_document_end (&cmd, &child); } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = _mongoc_cursor_new (database->client, database->name, MONGOC_QUERY_SLAVE_OK, 0, 0, 0, true, NULL, NULL, NULL, NULL); _mongoc_cursor_cursorid_init (cursor, &cmd); if (_mongoc_cursor_cursorid_prime (cursor)) { /* intentionally empty */ } else { if (mongoc_cursor_error (cursor, &lerror)) { if (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) { /* We are talking to a server that doesn' support listCollections. */ /* clear out the error. */ memset (&lerror, 0, sizeof lerror); /* try again with using system.namespaces */ mongoc_cursor_destroy (cursor); cursor = _mongoc_database_find_collections_legacy ( database, filter, error); } else if (error) { memcpy (error, &lerror, sizeof *error); } } } bson_destroy (&cmd); mongoc_read_prefs_destroy (read_prefs); return cursor; }
int database_find_blockchain_transaction(struct database* db, unsigned char* hash, size_t max_height, struct transaction** tx, size_t* height) { mongoc_collection_t* collection = mongoc_client_get_collection(db->client, database_name(db), "transactions"); // Build a query doc bson_t* query = bson_new(); // Set the hash BSON_APPEND_BINARY(query, "hash", BSON_SUBTYPE_BINARY, (uint8_t*)hash, 32); // Force the height to be valid (on the main chain) bson_t* height_doc = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(query, "height", height_doc); BSON_APPEND_INT32(height_doc, "$lte", (int)max_height); BSON_APPEND_INT32(height_doc, "$gte", 0); bson_append_document_end(query, height_doc); // Perform find mongoc_cursor_t* cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); bson_error_t error; if(cursor == NULL || mongoc_cursor_error(cursor, &error)) { printf("MongoDB error: %s\n", (cursor == NULL) ? "NULL cursor" : error.message); return -1; } bson_t const* doc; int found = 0; while(mongoc_cursor_next(cursor, &doc) != 0) { if(height != NULL) { bson_iter_t iter; if(!bson_iter_init_find(&iter, doc, "height") || !BSON_ITER_HOLDS_INT32(&iter)) { printf("MongoDB error: tx doesn't have height!\n"); return -1; } *height = (size_t)bson_iter_int32(&iter); } if(tx != NULL) { *tx = transaction_from_bson(doc); } found = 1; break; } mongoc_cursor_destroy(cursor); bson_destroy(height_doc); bson_destroy(query); mongoc_collection_destroy(collection); return found; }
static int mongo_set_realm_option_one(u08bits *realm, unsigned long value, const char* opt) { mongoc_collection_t * collection = mongo_get_collection("realm"); if(!collection) return -1; bson_t query, doc, child; bson_init(&query); BSON_APPEND_UTF8(&query, "realm", (const char *)realm); bson_init(&doc); size_t klen = 9 + strlen(opt); char * _k = (char *)turn_malloc(klen); strcpy(_k, "options."); strcat(_k, opt); if (value > 0) { bson_append_document_begin(&doc, "$set", -1, &child); BSON_APPEND_INT32(&child, _k, (int32_t)value); bson_append_document_end(&doc, &child); } else { bson_append_document_begin(&doc, "$unset", -1, &child); BSON_APPEND_INT32(&child, _k, 1); bson_append_document_end(&doc, &child); } turn_free(_k,klen); int ret = -1; if (!mongoc_collection_update(collection, MONGOC_UPDATE_MULTI_UPDATE, &query, &doc, NULL, NULL)) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error deleting origin information\n"); } else { ret = 0; } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&doc); return ret; }
static void append_write_concern_err (bson_t *doc, const char *errmsg, size_t errmsg_len) { bson_t array = BSON_INITIALIZER; bson_t child; bson_t errinfo; BSON_ASSERT (errmsg); /* writeConcernErrors: [{code: 64, * errmsg: errmsg, * errInfo: {wtimeout: true}}] */ bson_append_document_begin (&array, "0", 1, &child); bson_append_int32 (&child, "code", 4, 64); bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len); bson_append_document_begin (&child, "errInfo", 7, &errinfo); bson_append_bool (&errinfo, "wtimeout", 8, true); bson_append_document_end (&child, &errinfo); bson_append_document_end (&array, &child); bson_append_array (doc, "writeConcernErrors", 18, &array); bson_destroy (&array); }
int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, /* IN */ bson_t *dest, /* IN */ bson_iter_t *iter) /* IN */ { const bson_value_t *value; bson_iter_t ar; bson_iter_t citer; int32_t idx; int32_t count = 0; int32_t aridx; bson_t child; const char *keyptr = NULL; char key[12]; int len; ENTRY; BSON_ASSERT (result); BSON_ASSERT (dest); BSON_ASSERT (iter); BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter)); aridx = bson_count_keys (dest); if (bson_iter_recurse (iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer)) { len = (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key); bson_append_document_begin (dest, keyptr, len, &child); while (bson_iter_next (&citer)) { if (BSON_ITER_IS_KEY (&citer, "index")) { idx = bson_iter_int32 (&citer) + offset; BSON_APPEND_INT32 (&child, "index", idx); } else { value = bson_iter_value (&citer); BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value); } } bson_append_document_end (dest, &child); count++; } } } RETURN (count); }
static void append_upserted (bson_t *doc, const bson_value_t *upserted_id) { bson_t array = BSON_INITIALIZER; bson_t child; /* append upserted: [{index: 0, _id: upserted_id}]*/ bson_append_document_begin (&array, "0", 1, &child); bson_append_int32 (&child, "index", 5, 0); bson_append_value (&child, "_id", 3, upserted_id); bson_append_document_end (&array, &child); bson_append_array (doc, "upserted", 8, &array); bson_destroy (&array); }
void arrayToBSON(const Array& value, const char* key, bson_t* bson) { bson_t child; bool isDocument = arrayIsDocument(value); if (isDocument) { bson_append_document_begin(bson, key, -1, &child); } else { bson_append_array_begin(bson, key, -1, &child); } fillBSONWithArray(value, &child); if (isDocument) { bson_append_document_end(bson, &child); } else { bson_append_array_end(bson, &child); } }
static int mongo_set_permission_ip(const char *kind, u08bits *realm, const char* ip, int del) { char sub_collection_name[129]; snprintf(sub_collection_name,sizeof(sub_collection_name)-1,"%s_peer_ip",kind); mongoc_collection_t * collection = mongo_get_collection("realm"); if(!collection) return -1; int ret = -1; u08bits realm0[STUN_MAX_REALM_SIZE+1] = "\0"; if(!realm) realm=realm0; bson_t query, doc, child; bson_init(&query); BSON_APPEND_UTF8(&query, "realm", (const char *)realm); bson_init(&doc); if(del) { bson_append_document_begin(&doc, "$pull", -1, &child); } else { bson_append_document_begin(&doc, "$addToSet", -1, &child); } BSON_APPEND_UTF8(&child, sub_collection_name, (const char *)ip); bson_append_document_end(&doc, &child); mongoc_update_flags_t flags = MONGOC_UPDATE_NONE; if(del) { flags = MONGOC_UPDATE_MULTI_UPDATE; } else { flags = MONGOC_UPDATE_UPSERT; } if (!mongoc_collection_update(collection, flags, &query, &doc, NULL, NULL)) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error inserting permission ip information\n"); } else { ret = 0; } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&doc); return ret; }
static void _mongoc_monitor_legacy_write (mongoc_client_t *client, mongoc_write_command_t *command, const char *db, const char *collection, mongoc_server_stream_t *stream, int64_t request_id) { bson_t doc; bson_t wc; mongoc_apm_command_started_t event; ENTRY; if (!client->apm_callbacks.started) { EXIT; } bson_init (&doc); _mongoc_write_command_init (&doc, command, collection); BSON_APPEND_DOCUMENT_BEGIN (&doc, "writeConcern", &wc); BSON_APPEND_INT32 (&wc, "w", 0); bson_append_document_end (&doc, &wc); _append_array_from_command (command, &doc); mongoc_apm_command_started_init ( &event, &doc, db, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.started (&event); mongoc_apm_command_started_cleanup (&event); bson_destroy (&doc); }
void _mongoc_write_result_append_upsert (mongoc_write_result_t *result, int32_t idx, const bson_value_t *value) { bson_t child; const char *keyptr = NULL; char key[12]; int len; BSON_ASSERT (result); BSON_ASSERT (value); len = (int) bson_uint32_to_string ( result->upsert_append_count, &keyptr, key, sizeof key); bson_append_document_begin (&result->upserted, keyptr, len, &child); BSON_APPEND_INT32 (&child, "index", idx); BSON_APPEND_VALUE (&child, "_id", value); bson_append_document_end (&result->upserted, &child); result->upsert_append_count++; }
static void _append_write_err_legacy (mongoc_write_result_t *result, const char *err, mongoc_error_domain_t domain, int32_t code, uint32_t offset) { bson_t holder, write_errors, child; bson_iter_t iter; BSON_ASSERT (code > 0); if (!result->error.domain) { bson_set_error (&result->error, domain, (uint32_t) code, "%s", err); } /* stop processing, if result->ordered */ result->failed = true; bson_init (&holder); bson_append_array_begin (&holder, "0", 1, &write_errors); bson_append_document_begin (&write_errors, "0", 1, &child); /* set error's "index" to 0; fixed up in _mongoc_write_result_merge_arrays */ bson_append_int32 (&child, "index", 5, 0); bson_append_int32 (&child, "code", 4, code); bson_append_utf8 (&child, "errmsg", 6, err, -1); bson_append_document_end (&write_errors, &child); bson_append_array_end (&holder, &write_errors); bson_iter_init (&iter, &holder); bson_iter_next (&iter); _mongoc_write_result_merge_arrays ( offset, result, &result->writeErrors, &iter); bson_destroy (&holder); }