int main(void) { const uint8_t *json = (uint8_t*)"{ \"message\" : \"Hello World!\" }"; bson_error_t error; bson_t *doc; char *str; size_t len; if (!(doc = bson_new_from_json(json, -1, &error))) { fprintf(stderr, "Cannot initialize BSON structure from JSON example.\n"); fprintf(stderr, "BSON error message: %s\n", error.message); return 1; } printf("Has key \"message\" (expect \"yes\") : %s\n", yesno(bson_has_field(doc, "message"))); printf("Has key \"triceratops\" (expect\"no\") : %s\n", yesno(bson_has_field(doc, "triceratops"))); str = bson_as_json(doc, &len); printf("Original JSON document : %s\n", json); printf("BSON-to-JSON conversion : %s\n", str); bson_free(str); bson_destroy(doc); return 0; }
static bool with_transaction_callback_runner (mongoc_client_session_t *session, void *ctx, bson_t **reply, bson_error_t *error) { cb_ctx_t *cb_ctx = (cb_ctx_t *) ctx; bson_t local_reply; bson_t operation; bson_t operations; bson_t *test; bson_iter_t iter; bool res = false; test = &(cb_ctx->callback); if (bson_has_field (test, "operation")) { bson_lookup_doc (test, "operation", &operation); res = json_test_operation (cb_ctx->ctx, test, &operation, cb_ctx->ctx->collection, session, &local_reply); } else { ASSERT (bson_has_field (test, "operations")); bson_lookup_doc (test, "operations", &operations); BSON_ASSERT (bson_iter_init (&iter, &operations)); bson_init (&local_reply); while (bson_iter_next (&iter)) { bson_destroy (&local_reply); bson_iter_bson (&iter, &operation); res = json_test_operation (cb_ctx->ctx, test, &operation, cb_ctx->ctx->collection, session, &local_reply); if (!res) { break; } } } *reply = bson_copy (&local_reply); bson_destroy (&local_reply); return res; }
static bool with_transaction_test_run_operation (json_test_ctx_t *ctx, const bson_t *test, const bson_t *operation) { mongoc_transaction_opt_t *opts = NULL; mongoc_client_session_t *session = NULL; bson_error_t error; bson_t args; bson_t reply; bool res; cb_ctx_t cb_ctx; bson_lookup_doc (operation, "arguments", &args); /* If there is a 'callback' field, run the nested operations through mongoc_client_session_with_transaction(). */ if (bson_has_field (&args, "callback")) { bson_init (&reply); ASSERT (bson_has_field (operation, "object")); session = session_from_name (ctx, bson_lookup_utf8 (operation, "object")); ASSERT (session); bson_lookup_doc (&args, "callback", &cb_ctx.callback); cb_ctx.ctx = ctx; if (bson_has_field (&args, "options")) { opts = bson_lookup_txn_opts (&args, "options"); } res = mongoc_client_session_with_transaction ( session, with_transaction_callback_runner, opts, &cb_ctx, &error); } else { /* If there is no 'callback' field, then run simply. */ if (bson_has_field (&args, "session")) { session = session_from_name (ctx, bson_lookup_utf8 (&args, "session")); } res = json_test_operation ( ctx, test, operation, ctx->collection, session, &reply); } bson_destroy (&args); bson_destroy (&reply); mongoc_transaction_opts_destroy (opts); return res; }
mrb_value mrb_mongo_collection_insert(mrb_state *mrb, mrb_value self) { mrb_mongo_collection_data *data = DATA_PTR(self); mrb_value record_hash, inserted_hash; bson_t *doc; bson_oid_t oid; bson_error_t error; mrb_get_args(mrb, "H", &record_hash); doc = bson_new(); mrb_hash_to_bson(mrb, record_hash, doc); //add id if not supplied if (!bson_has_field(doc, "_id")) { bson_oid_init(&oid, NULL); bson_append_oid(doc, "_id", -1, &oid); } if (!mongoc_collection_insert(data->collection, MONGOC_INSERT_NONE, doc, NULL, &error)) { bson_destroy(doc); mrb_raise(mrb, E_RUNTIME_ERROR, error.message); } inserted_hash = mrb_hash_new(mrb); bson_to_mrb_hash(mrb, doc, inserted_hash); bson_destroy(doc); return inserted_hash; }
void mongoc_apm_command_started_init (mongoc_apm_command_started_t *event, const bson_t *command, const char *database_name, const char *command_name, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context) { bson_iter_t iter; uint32_t len; const uint8_t *data; /* Command Monitoring Spec: * * In cases where queries or commands are embedded in a $query parameter * when a read preference is provided, they MUST be unwrapped and the value * of the $query attribute becomes the filter or the command in the started * event. The read preference will subsequently be dropped as it is * considered metadata and metadata is not currently provided in the command * events. */ if (bson_has_field (command, "$readPreference")) { if (bson_iter_init_find (&iter, command, "$query") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); event->command = bson_new_from_data (data, len); event->command_owned = true; } else { /* Got $readPreference without $query, probably OP_MSG */ event->command = (bson_t *) command; event->command_owned = false; } } else { /* discard "const", we promise not to modify "command" */ event->command = (bson_t *) command; event->command_owned = false; } event->database_name = database_name; event->command_name = command_name; event->request_id = request_id; event->operation_id = operation_id; event->host = host; event->server_id = server_id; event->context = context; }
static void transactions_test_run_operation (json_test_ctx_t *ctx, const bson_t *test, const bson_t *operation, mongoc_collection_t *collection) { const char *description; const char *session_name; mongoc_client_session_t *session; if (bson_has_field (operation, "arguments.session")) { session_name = bson_lookup_utf8 (operation, "arguments.session"); if (!strcmp (session_name, "session0")) { session = ctx->sessions[0]; } else if (!strcmp (session_name, "session1")) { session = ctx->sessions[1]; } else { MONGOC_ERROR ("Unrecognized session name: %s", session_name); abort (); } } else { session = NULL; } description = bson_lookup_utf8 (test, "description"); /* we log warnings from abortTransaction. suppress warnings that we expect, * but don't suppress all: we want to know if any other tests log warnings */ if (!strcmp (description, "write conflict abort") || !strcmp (description, "abort ignores TransactionAborted") || !strcmp (description, "abort does not apply writeConcern")) { capture_logs (true); } /* json_test_operations() chose session0 or session1 from the * arguments.session field in the JSON test */ json_test_operation (test, operation, collection, session); capture_logs (false); }
mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, bson_uint32_t skip, bson_uint32_t limit, bson_uint32_t batch_size, bson_bool_t is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; const char *mode_str; bson_t child; ENTRY; BSON_ASSERT(client); BSON_ASSERT(db_and_collection); BSON_ASSERT(query); /* we can't have exhaust queries with limits */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && limit)); /* we can't have exhaust queries with sharded clusters */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid)); /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor = bson_malloc0(sizeof *cursor); cursor->client = client; strncpy(cursor->ns, db_and_collection, sizeof cursor->ns - 1); cursor->nslen = strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = cursor->batch_size; cursor->is_command = is_command; if (!bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL); mongoc_counter_cursors_active_inc(); RETURN(cursor); }
/* Update result with the read prefs, following Server Selection Spec. * The driver must have discovered the server is a mongos. */ static void _apply_read_preferences_mongos (const mongoc_read_prefs_t *read_prefs, const bson_t *query_bson, mongoc_apply_read_prefs_result_t *result /* OUT */) { mongoc_read_mode_t mode; const bson_t *tags = NULL; bson_t child; const char *mode_str; mode = mongoc_read_prefs_get_mode (read_prefs); if (read_prefs) { tags = mongoc_read_prefs_get_tags (read_prefs); } /* Server Selection Spec says: * * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag * and MUST NOT use $readPreference * * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference * * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol * flag and MUST also use $readPreference * * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol * flag. If the read preference contains a non-empty tag_sets parameter, * drivers MUST use $readPreference; otherwise, drivers MUST NOT use * $readPreference * * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference */ if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) { result->flags |= MONGOC_QUERY_SLAVE_OK; } else if (mode != MONGOC_READ_PRIMARY) { result->flags |= MONGOC_QUERY_SLAVE_OK; /* Server Selection Spec: "When any $ modifier is used, including the * $readPreference modifier, the query MUST be provided using the $query * modifier". * * This applies to commands, too. */ result->query_with_read_prefs = bson_new (); result->query_owned = true; if (bson_has_field (query_bson, "$query")) { bson_concat (result->query_with_read_prefs, query_bson); } else { bson_append_document (result->query_with_read_prefs, "$query", 6, query_bson); } bson_append_document_begin (result->query_with_read_prefs, "$readPreference", 15, &child); mode_str = _get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (!bson_empty0 (tags)) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (result->query_with_read_prefs, &child); } }
mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, bool is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_prefs_t *local_read_prefs = NULL; mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; bson_iter_t iter; const char *key; const char *mode_str; bson_t child; bool found = false; int i; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_and_collection); BSON_ASSERT (query); if (!read_prefs) { read_prefs = client->read_prefs; } cursor = bson_malloc0 (sizeof *cursor); /* * CDRIVER-244: * * If this is a command, we need to verify we can send it to the location * specified by the read preferences. Otherwise, log a warning that we * are rerouting to the primary instance. */ if (is_command && read_prefs && (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY) && bson_iter_init (&iter, query) && bson_iter_next (&iter) && (key = bson_iter_key (&iter))) { for (i = 0; gSecondaryOkCommands [i]; i++) { if (0 == strcasecmp (key, gSecondaryOkCommands [i])) { found = true; break; } } if (!found) { cursor->redir_primary = true; local_read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); read_prefs = local_read_prefs; MONGOC_INFO ("Database command \"%s\" rerouted to primary node", key); } } /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor->client = client; bson_strncpy (cursor->ns, db_and_collection, sizeof cursor->ns); cursor->nslen = (uint32_t)strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = batch_size; cursor->is_command = is_command; #define MARK_FAILED(c) \ do { \ (c)->failed = true; \ (c)->done = true; \ (c)->end_of_event = true; \ (c)->sent = true; \ } while (0) /* we can't have exhaust queries with limits */ if ((flags & MONGOC_QUERY_EXHAUST) && limit) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST and set a limit."); MARK_FAILED (cursor); GOTO (finish); } /* we can't have exhaust queries with sharded clusters */ if ((flags & MONGOC_QUERY_EXHAUST) && (client->cluster.mode == MONGOC_CLUSTER_SHARDED_CLUSTER)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify MONGOC_QUERY_EXHAUST with sharded cluster."); MARK_FAILED (cursor); GOTO (finish); } /* * Check types of various optional parameters. */ if (!is_command) { if (bson_iter_init_find (&iter, query, "$explain") && !(BSON_ITER_HOLDS_BOOL (&iter) || BSON_ITER_HOLDS_INT32 (&iter))) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "$explain must be a boolean."); MARK_FAILED (cursor); GOTO (finish); } if (bson_iter_init_find (&iter, query, "$snapshot") && !BSON_ITER_HOLDS_BOOL (&iter) && !BSON_ITER_HOLDS_INT32 (&iter)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "$snapshot must be a boolean."); MARK_FAILED (cursor); GOTO (finish); } } if (!cursor->is_command && !bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL); finish: mongoc_counter_cursors_active_inc(); if (local_read_prefs) { mongoc_read_prefs_destroy (local_read_prefs); } RETURN (cursor); }
/*--------------------------------------------------------------------------- * * _make_cursor -- * * Construct and send the aggregate command and create the resulting * cursor. On error, stream->cursor remains NULL, otherwise it is * created and must be destroyed. * * Return: * False on error and sets stream->err. * *-------------------------------------------------------------------------- */ static bool _make_cursor (mongoc_change_stream_t *stream) { mongoc_client_session_t *cs = NULL; bson_t command_opts; bson_t command; /* { aggregate: "coll", pipeline: [], ... } */ bson_t reply; bson_t getmore_opts = BSON_INITIALIZER; bson_iter_t iter; mongoc_server_description_t *sd; uint32_t server_id; int32_t max_wire_version = -1; BSON_ASSERT (stream); BSON_ASSERT (!stream->cursor); _make_command (stream, &command); bson_copy_to (&(stream->opts.extra), &command_opts); sd = mongoc_client_select_server ( stream->client, false /* for_writes */, stream->read_prefs, &stream->err); if (!sd) { goto cleanup; } server_id = mongoc_server_description_id (sd); bson_append_int32 (&command_opts, "serverId", 8, server_id); bson_append_int32 (&getmore_opts, "serverId", 8, server_id); max_wire_version = sd->max_wire_version; mongoc_server_description_destroy (sd); if (bson_iter_init_find (&iter, &command_opts, "sessionId")) { if (!_mongoc_client_session_from_iter ( stream->client, &iter, &cs, &stream->err)) { goto cleanup; } } else if (stream->implicit_session) { /* If an implicit session was created before, and this cursor is now * being recreated after resuming, then use the same session as before. */ cs = stream->implicit_session; if (!mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } else { /* Create an implicit session. This session lsid must be the same for the * agg command and the subsequent getMores. Thus, this implicit session is * passed as if it were an explicit session to * collection_read_command_with_opts and cursor_new_from_reply, but it is * still implicit and its lifetime is owned by this change_stream_t. */ mongoc_session_opt_t *session_opts; session_opts = mongoc_session_opts_new (); mongoc_session_opts_set_causal_consistency (session_opts, false); /* returns NULL if sessions aren't supported. ignore errors. */ cs = mongoc_client_start_session (stream->client, session_opts, NULL); stream->implicit_session = cs; mongoc_session_opts_destroy (session_opts); if (cs && !mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } if (cs && !mongoc_client_session_append (cs, &getmore_opts, &stream->err)) { goto cleanup; } if (stream->read_concern && !bson_has_field (&command_opts, "readConcern")) { mongoc_read_concern_append (stream->read_concern, &command_opts); } /* even though serverId has already been set, still pass the read prefs. * they are necessary for OP_MSG if sending to a secondary. */ if (!mongoc_client_read_command_with_opts (stream->client, stream->db, &command, stream->read_prefs, &command_opts, &reply, &stream->err)) { bson_destroy (&stream->err_doc); bson_copy_to (&reply, &stream->err_doc); bson_destroy (&reply); goto cleanup; } bson_append_bool ( &getmore_opts, MONGOC_CURSOR_TAILABLE, MONGOC_CURSOR_TAILABLE_LEN, true); bson_append_bool (&getmore_opts, MONGOC_CURSOR_AWAIT_DATA, MONGOC_CURSOR_AWAIT_DATA_LEN, true); /* maxTimeMS is only appended to getMores if these are set in cursor opts. */ if (stream->max_await_time_ms > 0) { bson_append_int64 (&getmore_opts, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, MONGOC_CURSOR_MAX_AWAIT_TIME_MS_LEN, stream->max_await_time_ms); } if (stream->batch_size > 0) { bson_append_int32 (&getmore_opts, MONGOC_CURSOR_BATCH_SIZE, MONGOC_CURSOR_BATCH_SIZE_LEN, stream->batch_size); } /* Change streams spec: "If neither startAtOperationTime nor resumeAfter are * specified, and the max wire version is >= 7, and the initial aggregate * command does not return a resumeToken (indicating no results), the * ChangeStream MUST save the operationTime from the initial aggregate * command when it returns." */ if (bson_empty (&stream->resume_token) && _mongoc_timestamp_empty (&stream->operation_time) && max_wire_version >= 7 && bson_iter_init_find (&iter, &reply, "operationTime")) { _mongoc_timestamp_set_from_bson (&stream->operation_time, &iter); } /* steals reply. */ stream->cursor = mongoc_cursor_new_from_command_reply_with_opts ( stream->client, &reply, &getmore_opts); cleanup: bson_destroy (&command); bson_destroy (&command_opts); bson_destroy (&getmore_opts); return stream->err.code == 0; }
static void test_mongoc_metadata_append_success (void) { mock_server_t *server; mongoc_uri_t *uri; mongoc_client_t *client; mongoc_client_pool_t *pool; request_t *request; const bson_t *request_doc; bson_iter_t iter; bson_iter_t md_iter; bson_iter_t inner_iter; const char *val; const char *driver_name = "php driver"; const char *driver_version = "version abc"; const char *platform = "./configure -nottoomanyflags"; char big_string [METADATA_MAX_SIZE]; memset (big_string, 'a', METADATA_MAX_SIZE - 1); big_string [METADATA_MAX_SIZE - 1] = '\0'; _reset_metadata (); /* Make sure setting the metadata works */ ASSERT (mongoc_metadata_append (driver_name, driver_version, platform)); server = mock_server_new (); mock_server_run (server); uri = mongoc_uri_copy (mock_server_get_uri (server)); mongoc_uri_set_option_as_int32 (uri, "heartbeatFrequencyMS", 500); pool = mongoc_client_pool_new (uri); /* Force topology scanner to start */ client = mongoc_client_pool_pop (pool); request = mock_server_receives_ismaster (server); ASSERT (request); request_doc = request_get_doc (request, 0); ASSERT (request_doc); ASSERT (bson_has_field (request_doc, "isMaster")); ASSERT (bson_has_field (request_doc, METADATA_FIELD)); ASSERT (bson_iter_init_find (&iter, request_doc, METADATA_FIELD)); ASSERT (bson_iter_recurse (&iter, &md_iter)); /* Make sure driver.name and driver.version and platform are all right */ ASSERT (bson_iter_find (&md_iter, "driver")); ASSERT (BSON_ITER_HOLDS_DOCUMENT (&md_iter)); ASSERT (bson_iter_recurse (&md_iter, &inner_iter)); ASSERT (bson_iter_find (&inner_iter, "name")); ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter)); val = bson_iter_utf8 (&inner_iter, NULL); ASSERT (val); ASSERT (strstr (val, driver_name) != NULL); ASSERT (bson_iter_find (&inner_iter, "version")); ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter)); val = bson_iter_utf8 (&inner_iter, NULL); ASSERT (val); ASSERT (strstr (val, driver_version)); /* Check os type not empty */ ASSERT (bson_iter_find (&md_iter, "os")); ASSERT (BSON_ITER_HOLDS_DOCUMENT (&md_iter)); ASSERT (bson_iter_recurse (&md_iter, &inner_iter)); ASSERT (bson_iter_find (&inner_iter, "type")); ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter)); val = bson_iter_utf8 (&inner_iter, NULL); ASSERT (val); ASSERT (strlen (val) > 0); /* Not checking os_name, as the spec says it can be NULL. */ /* Check platform field ok */ ASSERT (bson_iter_find (&md_iter, "platform")); ASSERT (BSON_ITER_HOLDS_UTF8 (&md_iter)); val = bson_iter_utf8 (&md_iter, NULL); ASSERT (val); ASSERT (strstr (val, platform) != NULL); mock_server_replies_simple (request, "{'ok': 1, 'ismaster': true}"); request_destroy (request); /* Cleanup */ mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); mongoc_uri_destroy (uri); mock_server_destroy (server); _reset_metadata (); }
/* Test the case where we can't prevent the metadata doc being too big * and so we just don't send it */ static void test_mongoc_metadata_cannot_send (void) { mock_server_t *server; mongoc_uri_t *uri; mongoc_client_t *client; mongoc_client_pool_t *pool; request_t *request; const char *const server_reply = "{'ok': 1, 'ismaster': true}"; const bson_t *request_doc; char big_string[METADATA_MAX_SIZE]; mongoc_metadata_t *md; _reset_metadata (); /* Mess with our global metadata struct so the metadata doc will be * way too big */ memset (big_string, 'a', METADATA_MAX_SIZE - 1); big_string[METADATA_MAX_SIZE - 1] = '\0'; md = _mongoc_metadata_get (); bson_free (md->os_name); md->os_name = bson_strdup (big_string); server = mock_server_new (); mock_server_run (server); uri = mongoc_uri_copy (mock_server_get_uri (server)); mongoc_uri_set_option_as_int32 (uri, "heartbeatFrequencyMS", 500); pool = mongoc_client_pool_new (uri); /* Pop a client to trigger the topology scanner */ client = mongoc_client_pool_pop (pool); request = mock_server_receives_ismaster (server); /* Make sure the isMaster request DOESN'T have a metadata field: */ ASSERT (request); request_doc = request_get_doc (request, 0); ASSERT (request_doc); ASSERT (bson_has_field (request_doc, "isMaster")); ASSERT (!bson_has_field (request_doc, METADATA_FIELD)); mock_server_replies_simple (request, server_reply); request_destroy (request); /* Cause failure on client side */ request = mock_server_receives_ismaster (server); ASSERT (request); mock_server_hangs_up (request); request_destroy (request); /* Make sure the isMaster request still DOESN'T have a metadata field * on subsequent heartbeats. */ request = mock_server_receives_ismaster (server); ASSERT (request); request_doc = request_get_doc (request, 0); ASSERT (request_doc); ASSERT (bson_has_field (request_doc, "isMaster")); ASSERT (!bson_has_field (request_doc, METADATA_FIELD)); mock_server_replies_simple (request, server_reply); request_destroy (request); /* cleanup */ mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); mongoc_uri_destroy (uri); mock_server_destroy (server); /* Reset again so the next tests don't have a metadata doc which * is too big */ _reset_metadata (); }
/* * Append to the platform field a huge string * Make sure that it gets truncated */ static void test_mongoc_metadata_too_big (void) { mongoc_client_t *client; mock_server_t *server; mongoc_uri_t *uri; future_t *future; request_t *request; const bson_t *ismaster_doc; bson_iter_t iter; enum { BUFFER_SIZE = METADATA_MAX_SIZE }; char big_string[BUFFER_SIZE]; uint32_t len; const uint8_t *dummy; server = mock_server_new (); mock_server_run (server); _reset_metadata (); memset (big_string, 'a', BUFFER_SIZE - 1); big_string[BUFFER_SIZE - 1] = '\0'; ASSERT (mongoc_metadata_append (NULL, NULL, big_string)); uri = mongoc_uri_copy (mock_server_get_uri (server)); client = mongoc_client_new_from_uri (uri); ASSERT (mongoc_client_set_appname (client, "my app")); /* Send a ping, mock server deals with it */ future = future_client_command_simple (client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, NULL); request = mock_server_receives_ismaster (server); /* Make sure the isMaster request has a metadata field, and it's not huge */ ASSERT (request); ismaster_doc = request_get_doc (request, 0); ASSERT (ismaster_doc); ASSERT (bson_has_field (ismaster_doc, "isMaster")); ASSERT (bson_has_field (ismaster_doc, METADATA_FIELD)); /* isMaster with metadata isn't too big */ bson_iter_init_find (&iter, ismaster_doc, METADATA_FIELD); ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter)); bson_iter_document (&iter, &len, &dummy); /* Should have truncated the platform field so it fits exactly */ ASSERT (len == METADATA_MAX_SIZE); mock_server_replies_simple (request, "{'ok': 1}"); request_destroy (request); request = mock_server_receives_command (server, "admin", MONGOC_QUERY_SLAVE_OK, "{'ping': 1}"); mock_server_replies_simple (request, "{'ok': 1}"); ASSERT (future_get_bool (future)); future_destroy (future); request_destroy (request); mongoc_client_destroy (client); mongoc_uri_destroy (uri); mock_server_destroy (server); /* So later tests don't have "aaaaa..." as the md platform string */ _reset_metadata (); }