mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, bool is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; const char *mode_str; bson_t child; ENTRY; BSON_ASSERT(client); BSON_ASSERT(db_and_collection); BSON_ASSERT(query); /* we can't have exhaust queries with limits */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && limit)); /* we can't have exhaust queries with sharded clusters */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid)); /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor = bson_malloc0(sizeof *cursor); cursor->client = client; bson_strcpy_w_null(cursor->ns, db_and_collection, sizeof cursor->ns); cursor->nslen = (uint32_t)strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = batch_size; cursor->is_command = is_command; if (!bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL); mongoc_counter_cursors_active_inc(); RETURN(cursor); }
mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, bool is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_prefs_t *local_read_prefs = NULL; mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; bson_iter_t iter; const char *key; const char *mode_str; bson_t child; bool found = false; int i; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_and_collection); BSON_ASSERT (query); if (!read_prefs) { read_prefs = client->read_prefs; } cursor = bson_malloc0 (sizeof *cursor); /* * CDRIVER-244: * * If this is a command, we need to verify we can send it to the location * specified by the read preferences. Otherwise, log a warning that we * are rerouting to the primary instance. */ if (is_command && read_prefs && (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY) && bson_iter_init (&iter, query) && bson_iter_next (&iter) && (key = bson_iter_key (&iter))) { for (i = 0; gSecondaryOkCommands [i]; i++) { if (0 == strcasecmp (key, gSecondaryOkCommands [i])) { found = true; break; } } if (!found) { cursor->redir_primary = true; local_read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); read_prefs = local_read_prefs; MONGOC_WARNING ("Database command \"%s\" rerouted to primary node", key); } } /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor->client = client; bson_strncpy (cursor->ns, db_and_collection, sizeof cursor->ns); cursor->nslen = (uint32_t)strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = batch_size; cursor->is_command = is_command; /* we can't have exhaust queries with limits */ if ((flags & MONGOC_QUERY_EXHAUST) && limit) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST and set a limit."); cursor->failed = true; cursor->done = true; cursor->end_of_event = true; cursor->sent = true; GOTO (finish); } /* we can't have exhaust queries with sharded clusters */ if ((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST with sharded cluster."); cursor->failed = true; cursor->done = true; cursor->end_of_event = true; cursor->sent = true; GOTO (finish); } if (!cursor->is_command && !bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL); finish: mongoc_counter_cursors_active_inc(); if (local_read_prefs) { mongoc_read_prefs_destroy (local_read_prefs); } RETURN (cursor); }
/* Update result with the read prefs, following Server Selection Spec. * The driver must have discovered the server is a mongos. */ static void _apply_read_preferences_mongos (const mongoc_read_prefs_t *read_prefs, const bson_t *query_bson, mongoc_apply_read_prefs_result_t *result /* OUT */) { mongoc_read_mode_t mode; const bson_t *tags = NULL; bson_t child; const char *mode_str; #ifdef MONGOC_EXPERIMENTAL_FEATURES int64_t max_staleness_ms; #endif mode = mongoc_read_prefs_get_mode (read_prefs); if (read_prefs) { tags = mongoc_read_prefs_get_tags (read_prefs); } /* Server Selection Spec says: * * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag * and MUST NOT use $readPreference * * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference * * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol * flag and MUST also use $readPreference * * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol * flag. If the read preference contains a non-empty tag_sets parameter, * drivers MUST use $readPreference; otherwise, drivers MUST NOT use * $readPreference * * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference */ if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) { result->flags |= MONGOC_QUERY_SLAVE_OK; } else if (mode != MONGOC_READ_PRIMARY) { result->flags |= MONGOC_QUERY_SLAVE_OK; /* Server Selection Spec: "When any $ modifier is used, including the * $readPreference modifier, the query MUST be provided using the $query * modifier". * * This applies to commands, too. */ result->query_with_read_prefs = bson_new (); result->query_owned = true; if (bson_has_field (query_bson, "$query")) { bson_concat (result->query_with_read_prefs, query_bson); } else { bson_append_document (result->query_with_read_prefs, "$query", 6, query_bson); } bson_append_document_begin (result->query_with_read_prefs, "$readPreference", 15, &child); mode_str = _get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (!bson_empty0 (tags)) { bson_append_array (&child, "tags", 4, tags); } #ifdef MONGOC_EXPERIMENTAL_FEATURES max_staleness_ms = mongoc_read_prefs_get_max_staleness_ms (read_prefs); if (max_staleness_ms > 0) { bson_append_int64 (&child, "maxStalenessMS", 14, max_staleness_ms); } #endif bson_append_document_end (result->query_with_read_prefs, &child); } }
mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, bool is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_prefs_t *local_read_prefs = NULL; mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; bson_iter_t iter; const char *key; const char *mode_str; bson_t child; bool found = false; int i; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_and_collection); BSON_ASSERT (query); if (!read_prefs) { read_prefs = client->read_prefs; } cursor = bson_malloc0 (sizeof *cursor); /* * DRIVERS-63: * * If this is a command and we have read_prefs other than PRIMARY, we need to * set SlaveOK bit in the protocol. */ if (is_command && read_prefs && (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY)) { flags |= MONGOC_QUERY_SLAVE_OK; } /* * CDRIVER-244: * * If this is a command, we need to verify we can send it to the location * specified by the read preferences. Otherwise, log a warning that we * are rerouting to the primary instance. */ if (is_command && read_prefs && (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY) && bson_iter_init (&iter, query) && bson_iter_next (&iter) && (key = bson_iter_key (&iter))) { for (i = 0; gSecondaryOkCommands [i]; i++) { if (0 == strcasecmp (key, gSecondaryOkCommands [i])) { found = true; break; } } if (!found) { cursor->redir_primary = true; local_read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); read_prefs = local_read_prefs; MONGOC_INFO ("Database command \"%s\" rerouted to primary node", key); } } /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor->client = client; bson_strncpy (cursor->ns, db_and_collection, sizeof cursor->ns); cursor->nslen = (uint32_t)strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = batch_size; cursor->is_command = is_command; cursor->has_fields = !!fields; #define MARK_FAILED(c) \ do { \ bson_init (&(c)->query); \ bson_init (&(c)->fields); \ (c)->failed = true; \ (c)->done = true; \ (c)->end_of_event = true; \ (c)->sent = true; \ } while (0) /* we can't have exhaust queries with limits */ if ((flags & MONGOC_QUERY_EXHAUST) && limit) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST and set a limit."); MARK_FAILED (cursor); GOTO (finish); } /* we can't have exhaust queries with sharded clusters */ if ((flags & MONGOC_QUERY_EXHAUST) && (client->cluster.mode == MONGOC_CLUSTER_SHARDED_CLUSTER)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST with sharded cluster."); MARK_FAILED (cursor); GOTO (finish); } /* * Check types of various optional parameters. */ if (!is_command) { if (bson_iter_init_find (&iter, query, "$explain") && !(BSON_ITER_HOLDS_BOOL (&iter) || BSON_ITER_HOLDS_INT32 (&iter))) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "$explain must be a boolean."); MARK_FAILED (cursor); GOTO (finish); } if (bson_iter_init_find (&iter, query, "$snapshot") && !BSON_ITER_HOLDS_BOOL (&iter) && !BSON_ITER_HOLDS_INT32 (&iter)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "$snapshot must be a boolean."); MARK_FAILED (cursor); GOTO (finish); } } /* * Check if we have a mixed top-level query and dollar keys such * as $orderby. This is not allowed (you must use {$query:{}}. */ if (bson_iter_init (&iter, query)) { bool found_dollar = false; bool found_non_dollar = false; while (bson_iter_next (&iter)) { if (bson_iter_key (&iter)[0] == '$') { found_dollar = true; } else { found_non_dollar = true; } } if (found_dollar && found_non_dollar) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot mix top-level query with dollar keys such " "as $orderby. Use {$query: {},...} instead."); MARK_FAILED (cursor); GOTO (finish); } } if (!cursor->is_command && !bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL, NULL); finish: mongoc_counter_cursors_active_inc(); if (local_read_prefs) { mongoc_read_prefs_destroy (local_read_prefs); } RETURN (cursor); }
/** save a gridfs file */ bool mongoc_gridfs_file_save (mongoc_gridfs_file_t *file) { bson_t *selector, *update, child; const char *md5; const char *filename; const char *content_type; const bson_t *aliases; const bson_t *metadata; bool r; ENTRY; if (!file->is_dirty) { return 1; } if (file->page && _mongoc_gridfs_file_page_is_dirty (file->page)) { _mongoc_gridfs_file_flush_page (file); } md5 = mongoc_gridfs_file_get_md5 (file); filename = mongoc_gridfs_file_get_filename (file); content_type = mongoc_gridfs_file_get_content_type (file); aliases = mongoc_gridfs_file_get_aliases (file); metadata = mongoc_gridfs_file_get_metadata (file); selector = bson_new (); bson_append_value (selector, "_id", -1, &file->files_id); update = bson_new (); bson_append_document_begin (update, "$set", -1, &child); bson_append_int64 (&child, "length", -1, file->length); bson_append_int32 (&child, "chunkSize", -1, file->chunk_size); bson_append_date_time (&child, "uploadDate", -1, file->upload_date); if (md5) { bson_append_utf8 (&child, "md5", -1, md5, -1); } if (filename) { bson_append_utf8 (&child, "filename", -1, filename, -1); } if (content_type) { bson_append_utf8 (&child, "contentType", -1, content_type, -1); } if (aliases) { bson_append_array (&child, "aliases", -1, aliases); } if (metadata) { bson_append_document (&child, "metadata", -1, metadata); } bson_append_document_end (update, &child); r = mongoc_collection_update (file->gridfs->files, MONGOC_UPDATE_UPSERT, selector, update, NULL, &file->error); file->failed = !r; bson_destroy (selector); bson_destroy (update); file->is_dirty = 0; RETURN (r); }
static void test_aggregate (void) { mongoc_collection_t *collection; mongoc_client_t *client; mongoc_cursor_t *cursor; const bson_t *doc; bson_error_t error; bool r; bson_t b; bson_t match; bson_t pipeline; bson_iter_t iter; bson_init(&b); bson_append_utf8(&b, "hello", -1, "world", -1); bson_init(&match); bson_append_document(&match, "$match", -1, &b); bson_init(&pipeline); bson_append_document(&pipeline, "0", -1, &match); client = mongoc_client_new(gTestUri); ASSERT (client); collection = mongoc_client_get_collection(client, "test", "test"); ASSERT (collection); mongoc_collection_drop(collection, &error); r = mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, NULL, &error); ASSERT (r); cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, NULL); ASSERT (cursor); /* * This can fail if we are connecting to a pre-2.5.x MongoDB instance. */ r = mongoc_cursor_next(cursor, &doc); if (mongoc_cursor_error(cursor, &error)) { MONGOC_WARNING("%s", error.message); } ASSERT (r); ASSERT (doc); ASSERT (bson_iter_init_find (&iter, doc, "hello") && BSON_ITER_HOLDS_UTF8 (&iter)); r = mongoc_cursor_next(cursor, &doc); if (mongoc_cursor_error(cursor, &error)) { MONGOC_WARNING("%s", error.message); } ASSERT (!r); ASSERT (!doc); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); mongoc_client_destroy(client); bson_destroy(&b); bson_destroy(&pipeline); bson_destroy(&match); }
static void test_aggregate (void) { mongoc_collection_t *collection; mongoc_database_t *database; mongoc_client_t *client; mongoc_cursor_t *cursor; const bson_t *doc; bson_error_t error; bool r; bson_t b; bson_t opts; bson_t match; bson_t pipeline; bson_iter_t iter; int i; bson_init(&b); bson_append_utf8(&b, "hello", -1, "world", -1); bson_init(&match); bson_append_document(&match, "$match", -1, &b); bson_init(&pipeline); bson_append_document(&pipeline, "0", -1, &match); client = mongoc_client_new(gTestUri); ASSERT (client); database = get_test_database (client); ASSERT (database); collection = get_test_collection (client, "test_aggregate"); ASSERT (collection); mongoc_collection_drop(collection, &error); r = mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, NULL, &error); ASSERT (r); for (i = 0; i < 2; i++) { if (i % 2 == 0) { cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, NULL, NULL); ASSERT (cursor); } else { bson_init (&opts); BSON_APPEND_INT32 (&opts, "batchSize", 10); BSON_APPEND_BOOL (&opts, "allowDiskUse", true); cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, &opts, NULL); ASSERT (cursor); bson_destroy (&opts); } /* * This can fail if we are connecting to a 2.0 MongoDB instance. */ r = mongoc_cursor_next(cursor, &doc); if (mongoc_cursor_error(cursor, &error)) { if ((error.domain == MONGOC_ERROR_QUERY) && (error.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) { mongoc_cursor_destroy (cursor); break; } MONGOC_WARNING("[%d.%d] %s", error.domain, error.code, error.message); } ASSERT (r); ASSERT (doc); ASSERT (bson_iter_init_find (&iter, doc, "hello") && BSON_ITER_HOLDS_UTF8 (&iter)); r = mongoc_cursor_next(cursor, &doc); if (mongoc_cursor_error(cursor, &error)) { MONGOC_WARNING("%s", error.message); } ASSERT (!r); ASSERT (!doc); mongoc_cursor_destroy(cursor); } r = mongoc_collection_drop(collection, &error); ASSERT (r); mongoc_collection_destroy(collection); mongoc_database_destroy(database); mongoc_client_destroy(client); bson_destroy(&b); bson_destroy(&pipeline); bson_destroy(&match); }
static void on_stack_change(jsonsl_t parser, jsonsl_action_t action, struct jsonsl_state_st * state, const jsonsl_char_t * at) { /* printf("%c%c", action, state->type); if (state->type == JSONSL_T_SPECIAL) { printf("%c", state->special_flags); } printf("%d", state->pos_begin); if (action == JSONSL_ACTION_POP) { printf("-%d", state->pos_cur); } printf("\n"); */ struct bson_state * bstate = (struct bson_state *) parser->data; if (action == JSONSL_ACTION_PUSH) { if (state->type == JSONSL_T_OBJECT || state->type == JSONSL_T_LIST) { if (bstate->cur_entry > -1) { bstate->cur_entry++; bstate->entry[bstate->cur_entry].key = (bstring) state->data; } else { bstate->cur_entry = 0; bstate->entry[bstate->cur_entry].key = NULL; } bstate->entry[bstate->cur_entry].bson = bson_new(); bstate->entry[bstate->cur_entry].array_index = 0; } } else if (action == JSONSL_ACTION_POP) { if (state->type == JSONSL_T_HKEY) { state->data = read_string(bstate->text + state->pos_begin, state->pos_cur - state->pos_begin); } else if (state->type == JSONSL_T_OBJECT || state->type == JSONSL_T_LIST) { if (bstate->cur_entry > 0) { struct bson_entry * entry = &(bstate->entry[bstate->cur_entry]); struct bson_entry * parent = &(bstate->entry[bstate->cur_entry - 1]); if (state->type == JSONSL_T_OBJECT) { bson_append_document(parent->bson, bdata(entry->key), blength(entry->key), entry->bson); } else { bson_append_array(parent->bson, bdata(entry->key), blength(entry->key), entry->bson); } } bstate->cur_entry--; } else { bstring key = (bstring) state->data; if (key == NULL) { key = bformat("%d", bstate->entry[bstate->cur_entry].array_index); bstate->entry[bstate->cur_entry].array_index++; } if (state->type == JSONSL_T_SPECIAL) { if (state->special_flags & JSONSL_SPECIALf_BOOLEAN) { bson_append_bool(bstate->entry[bstate->cur_entry].bson, bdata(key), blength(key), state->special_flags & JSONSL_SPECIALf_TRUE); } else if (state->special_flags & JSONSL_SPECIALf_NULL) { bson_append_null(bstate->entry[bstate->cur_entry].bson, bdata(key), blength(key)); } else if (state->special_flags & JSONSL_SPECIALf_NUMERIC) { size_t length = state->pos_cur - state->pos_begin; char num[length + 1]; * (num + length) = '\0'; memcpy(num, bstate->text + state->pos_begin, length); if (state->special_flags & JSONSL_SPECIALf_NUMNOINT) { bson_append_double(bstate->entry[bstate->cur_entry].bson, bdata(key), blength(key), atof(num)); } else { bson_append_int64(bstate->entry[bstate->cur_entry].bson, bdata(key), blength(key), atoi(num)); } } } else { bstring value = read_string(bstate->text + state->pos_begin, state->pos_cur - state->pos_begin); bson_append_utf8(bstate->entry[bstate->cur_entry].bson, bdata(key), blength(key), bdata(value), blength(value)); } } } }
bool BsonAppendBson(BSON* b, char *key, BSON* c) { return bson_append_document(b, key, strlen(key), c); }
static void test_bson_append_general (void) { bson_uint8_t bytes[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x23, 0x45 }; bson_oid_t oid; bson_t *bson; bson_t *array; bson_t *subdoc; bson = bson_new(); assert(bson_append_int32(bson, "int", -1, 1)); assert_bson_equal_file(bson, "test1.bson"); bson_destroy(bson); bson = bson_new(); assert(bson_append_int64(bson, "int64", -1, 1)); assert_bson_equal_file(bson, "test2.bson"); bson_destroy(bson); bson = bson_new(); assert(bson_append_double(bson, "double", -1, 1.123)); assert_bson_equal_file(bson, "test3.bson"); bson_destroy(bson); bson = bson_new(); assert(bson_append_utf8(bson, "string", -1, "some string", -1)); assert_bson_equal_file(bson, "test5.bson"); bson_destroy(bson); bson = bson_new(); array = bson_new(); assert(bson_append_int32(array, "0", -1, 1)); assert(bson_append_int32(array, "1", -1, 2)); assert(bson_append_int32(array, "2", -1, 3)); assert(bson_append_int32(array, "3", -1, 4)); assert(bson_append_int32(array, "4", -1, 5)); assert(bson_append_int32(array, "5", -1, 6)); assert(bson_append_array(bson, "array[int]", -1, array)); assert_bson_equal_file(bson, "test6.bson"); bson_destroy(array); bson_destroy(bson); bson = bson_new(); array = bson_new(); assert(bson_append_double(array, "0", -1, 1.123)); assert(bson_append_double(array, "1", -1, 2.123)); assert(bson_append_array(bson, "array[double]", -1, array)); assert_bson_equal_file(bson, "test7.bson"); bson_destroy(array); bson_destroy(bson); bson = bson_new(); subdoc = bson_new(); assert(bson_append_int32(subdoc, "int", -1, 1)); assert(bson_append_document(bson, "document", -1, subdoc)); assert_bson_equal_file(bson, "test8.bson"); bson_destroy(subdoc); bson_destroy(bson); bson = bson_new(); assert(bson_append_null(bson, "null", -1)); assert_bson_equal_file(bson, "test9.bson"); bson_destroy(bson); bson = bson_new(); assert(bson_append_regex(bson, "regex", -1, "1234", "i")); assert_bson_equal_file(bson, "test10.bson"); bson_destroy(bson); bson = bson_new(); assert(bson_append_utf8(bson, "hello", -1, "world", -1)); assert_bson_equal_file(bson, "test11.bson"); bson_destroy(bson); bson = bson_new(); array = bson_new(); assert(bson_append_utf8(array, "0", -1, "awesome", -1)); assert(bson_append_double(array, "1", -1, 5.05)); assert(bson_append_int32(array, "2", -1, 1986)); assert(bson_append_array(bson, "BSON", -1, array)); assert_bson_equal_file(bson, "test12.bson"); bson_destroy(bson); bson_destroy(array); bson = bson_new(); memcpy(&oid, bytes, sizeof oid); assert(bson_append_oid(bson, "_id", -1, &oid)); subdoc = bson_new(); assert(bson_append_oid(subdoc, "_id", -1, &oid)); array = bson_new(); assert(bson_append_utf8(array, "0", -1, "1", -1)); assert(bson_append_utf8(array, "1", -1, "2", -1)); assert(bson_append_utf8(array, "2", -1, "3", -1)); assert(bson_append_utf8(array, "3", -1, "4", -1)); assert(bson_append_array(subdoc, "tags", -1, array)); bson_destroy(array); assert(bson_append_utf8(subdoc, "text", -1, "asdfanother", -1)); array = bson_new(); assert(bson_append_utf8(array, "name", -1, "blah", -1)); assert(bson_append_document(subdoc, "source", -1, array)); bson_destroy(array); assert(bson_append_document(bson, "document", -1, subdoc)); bson_destroy(subdoc); array = bson_new(); assert(bson_append_utf8(array, "0", -1, "source", -1)); assert(bson_append_array(bson, "type", -1, array)); bson_destroy(array); array = bson_new(); assert(bson_append_utf8(array, "0", -1, "server_created_at", -1)); assert(bson_append_array(bson, "missing", -1, array)); bson_destroy(array); assert_bson_equal_file(bson, "test17.bson"); bson_destroy(bson); }