static void test_find_and_modify_opts (void) { mock_server_t *server; mongoc_client_t *client; mongoc_collection_t *collection; bson_error_t error; mongoc_find_and_modify_opts_t *opts; future_t *future; request_t *request; server = mock_server_with_autoismaster (0); mock_server_run (server); client = mongoc_client_new_from_uri (mock_server_get_uri (server)); collection = mongoc_client_get_collection (client, "db", "collection"); opts = mongoc_find_and_modify_opts_new (); assert (mongoc_find_and_modify_opts_set_max_time_ms (opts, 42)); assert (mongoc_find_and_modify_opts_append (opts, tmp_bson ("{'foo': 1}"))); future = future_collection_find_and_modify_with_opts ( collection, tmp_bson ("{}"), opts, NULL, &error); request = mock_server_receives_command ( server, "db", MONGOC_QUERY_NONE, "{'findAndModify': 'collection', 'maxTimeMS': 42, 'foo': 1}"); mock_server_replies_ok_and_destroys (request); ASSERT_OR_PRINT (future_get_bool (future), error); future_destroy (future); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mock_server_destroy (server); }
static void test_invalid_write_concern (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; mongoc_write_concern_t *write_concern; mongoc_server_stream_t *server_stream; bson_t *doc; bson_t reply = BSON_INITIALIZER; bson_error_t error; bool r; client = test_framework_client_new (); assert(client); collection = get_test_collection(client, "test_invalid_write_concern"); assert(collection); write_concern = mongoc_write_concern_new(); assert(write_concern); mongoc_write_concern_set_w(write_concern, 0); mongoc_write_concern_set_journal(write_concern, true); assert(!mongoc_write_concern_is_valid (write_concern)); doc = BCON_NEW("_id", BCON_INT32(0)); _mongoc_write_command_init_insert(&command, doc, write_flags, ++client->cluster.operation_id, true); _mongoc_write_result_init (&result); server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, write_concern, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); assert(!r); ASSERT_CMPINT(error.domain, ==, MONGOC_ERROR_COMMAND); ASSERT_CMPINT(error.code, ==, MONGOC_ERROR_COMMAND_INVALID_ARG); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); bson_destroy(doc); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy(collection); mongoc_client_destroy(client); mongoc_write_concern_destroy(write_concern); }
static void _test_topology_events (bool pooled) { mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; context_t context; bool r; bson_error_t error; bson_iter_t events_iter; bson_iter_t event_iter; uint32_t i; context_init (&context); if (pooled) { pool = test_framework_client_pool_new (); pool_set_topology_event_callbacks (pool, &context); client = mongoc_client_pool_pop (pool); } else { client = test_framework_client_new (); client_set_topology_event_callbacks (client, &context); } r = mongoc_client_command_simple (client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error); ASSERT_OR_PRINT (r, error); if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } /* first event is topology opening */ bson_iter_init (&events_iter, &context.events); bson_iter_next (&events_iter); ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_opening_event")); /* last event is topology closed */ for (i = 1; i < context.n_events; i++) { ASSERT (bson_iter_next (&events_iter)); } ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_closed_event")); /* no more events */ ASSERT (!bson_iter_next (&events_iter)); context_destroy (&context); }
static void test_topology_events_disabled (void) { mongoc_client_t *client; context_t context; bool r; bson_error_t error; bson_iter_t events_iter; bson_iter_t event_iter; uint32_t i; context_init (&context); client = test_framework_client_new (); client_set_topology_event_callbacks (client, &context); r = mongoc_client_command_simple ( client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error); ASSERT_OR_PRINT (r, error); /* disable callbacks before destroying so we don't see a topology closed * event */ mongoc_client_set_apm_callbacks (client, NULL, NULL); mongoc_client_destroy (client); /* first event is topology opening */ bson_iter_init (&events_iter, &context.events); bson_iter_next (&events_iter); ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_opening_event")); /* move forward to the last event */ for (i = 1; i < context.n_events; i++) { ASSERT (bson_iter_next (&events_iter)); } /* verify we didn't receive a topology closed event */ ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (!bson_iter_find (&event_iter, "topology_closed_event")); /* no more events */ ASSERT (!bson_iter_next (&events_iter)); context_destroy (&context); }
static future_t * bulk_exec (func_ctx_t *ctx, bson_t *cmd) { mongoc_bulk_operation_t *bulk; bson_error_t error; bool r; bulk = mongoc_collection_create_bulk_operation_with_opts (ctx->collection, ctx->opts); ctx->data = bulk; ctx->destructor = bulk_operation_cleanup; r = ctx->test->bulk_op (bulk, &error, cmd); ASSERT_OR_PRINT (r, error); return future_bulk_operation_execute (bulk, NULL /* reply */, &ctx->error); }
static void test_exhaust (void) { mock_server_t *server; mongoc_client_t *client; mongoc_collection_t *collection; mongoc_cursor_t *cursor; request_t *request; future_t *future; const bson_t *doc; bson_error_t error; server = mock_server_with_autoismaster (WIRE_VERSION_FIND_CMD); mock_server_run (server); client = mongoc_client_new_from_uri (mock_server_get_uri (server)); collection = mongoc_client_get_collection (client, "db", "collection"); cursor = mongoc_collection_find_with_opts (collection, tmp_bson (NULL), NULL, tmp_bson ("{'exhaust': true}")); future = future_cursor_next (cursor, &doc); /* Find, getMore and killCursors commands spec: "The find command does not * support the exhaust flag from OP_QUERY. Drivers that support exhaust MUST * fallback to existing OP_QUERY wire protocol messages." */ request = mock_server_receives_request (server); mock_server_replies_to_find ( request, MONGOC_QUERY_SLAVE_OK | MONGOC_QUERY_EXHAUST, 0, 0, "db.collection", "{}", false /* is_command */); ASSERT (future_get_bool (future)); ASSERT_OR_PRINT (!mongoc_cursor_error (cursor, &error), error); request_destroy (request); future_destroy (future); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mock_server_destroy (server); }
static void test_with_transaction_timeout (void *ctx) { mongoc_client_t *client; mongoc_client_session_t *session; bson_error_t error; bool res; client = test_framework_client_new (); session = mongoc_client_start_session (client, NULL, &error); ASSERT_OR_PRINT (session, error); session->with_txn_timeout_ms = 10; /* Test Case 1: Test that if the callback returns an error with the TransientTransactionError label and we have exceeded the timeout, withTransaction fails. */ res = mongoc_client_session_with_transaction ( session, with_transaction_fail_transient_txn, NULL, NULL, &error); ASSERT (!res); /* Test Case 2: If committing returns an error with the UnknownTransactionCommitResult label and we have exceeded the timeout, withTransaction fails. */ session->fail_commit_label = UNKNOWN_COMMIT_RESULT; res = mongoc_client_session_with_transaction ( session, with_transaction_do_nothing, NULL, NULL, &error); ASSERT (!res); /* Test Case 3: If committing returns an error with the TransientTransactionError label and we have exceeded the timeout, withTransaction fails. */ session->fail_commit_label = TRANSIENT_TXN_ERR; res = mongoc_client_session_with_transaction ( session, with_transaction_do_nothing, NULL, NULL, &error); ASSERT (!res); mongoc_client_session_destroy (session); mongoc_client_destroy (client); }
static void _test_collection_op_query_or_find_command ( test_collection_find_with_opts_t *test_data, check_request_fn_t check_request_fn, const char *reply_json, int32_t max_wire_version) { mock_server_t *server; mongoc_client_t *client; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; future_t *future; request_t *request; const bson_t *doc; server = mock_server_with_autoismaster (max_wire_version); mock_server_run (server); client = mongoc_client_new_from_uri (mock_server_get_uri (server)); collection = mongoc_client_get_collection (client, "db", "collection"); cursor = mongoc_collection_find_with_opts (collection, test_data->filter_bson, test_data->read_prefs, test_data->opts_bson); ASSERT_OR_PRINT (!mongoc_cursor_error (cursor, &error), error); future = future_cursor_next (cursor, &doc); request = check_request_fn (server, test_data); ASSERT (request); mock_server_replies_simple (request, reply_json); ASSERT (future_get_bool (future)); request_destroy (request); future_destroy (future); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mock_server_destroy (server); }
static void insert_test_docs (mongoc_collection_t *collection) { mongoc_write_concern_t *write_concern; bson_error_t error; bson_oid_t oid; bson_t b; int i; write_concern = mongoc_write_concern_new(); mongoc_write_concern_set_w(write_concern, 3); { const bson_t *wc; char *str; wc = _mongoc_write_concern_get_gle(write_concern); str = bson_as_json(wc, NULL); fprintf(stderr, "Write Concern: %s\n", str); bson_free(str); } for (i = 0; i < 200; i++) { bson_init(&b); bson_oid_init(&oid, NULL); bson_append_oid(&b, "_id", 3, &oid); ASSERT_OR_PRINT (mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, write_concern, &error), error); bson_destroy(&b); } mongoc_write_concern_destroy(write_concern); }
static void test_find_and_modify_bypass (bool bypass) { mongoc_collection_t *collection; mongoc_client_t *client; mock_server_t *server; request_t *request; future_t *future; bson_error_t error; bson_t *update; bson_t doc = BSON_INITIALIZER; bson_t reply; mongoc_find_and_modify_opts_t *opts; server = mock_server_new (); mock_server_run (server); client = mongoc_client_new_from_uri (mock_server_get_uri (server)); ASSERT (client); collection = mongoc_client_get_collection (client, "test", "test_find_and_modify"); auto_ismaster (server, WIRE_VERSION_FAM_WRITE_CONCERN, /* max_wire_version */ 48000000, /* max_message_size */ 16777216, /* max_bson_size */ 1000); /* max_write_batch_size */ BSON_APPEND_INT32 (&doc, "superduper", 77889); update = BCON_NEW ("$set", "{", "superduper", BCON_INT32 (1234), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_bypass_document_validation (opts, bypass); mongoc_find_and_modify_opts_set_update (opts, update); mongoc_find_and_modify_opts_set_flags (opts, MONGOC_FIND_AND_MODIFY_RETURN_NEW); future = future_collection_find_and_modify_with_opts ( collection, &doc, opts, &reply, &error); if (bypass) { request = mock_server_receives_command ( server, "test", MONGOC_QUERY_NONE, "{ 'findAndModify' : 'test_find_and_modify', " "'query' : { 'superduper' : 77889 }," "'update' : { '$set' : { 'superduper' : 1234 } }," "'new' : true," "'bypassDocumentValidation' : true }"); } else { request = mock_server_receives_command ( server, "test", MONGOC_QUERY_NONE, "{ 'findAndModify' : 'test_find_and_modify', " "'query' : { 'superduper' : 77889 }," "'update' : { '$set' : { 'superduper' : 1234 } }," "'new' : true," "'bypassDocumentValidation' : false }"); } mock_server_replies_simple (request, "{ 'value' : null, 'ok' : 1 }"); ASSERT_OR_PRINT (future_get_bool (future), error); future_destroy (future); mongoc_find_and_modify_opts_destroy (opts); bson_destroy (&reply); bson_destroy (update); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mock_server_destroy (server); bson_destroy (&doc); }
static void test_find_and_modify_collation (int wire) { mock_server_t *server; mongoc_client_t *client; mongoc_collection_t *collection; bson_error_t error; mongoc_find_and_modify_opts_t *opts; future_t *future; request_t *request; bson_t *collation; server = mock_server_with_autoismaster (wire); mock_server_run (server); client = mongoc_client_new_from_uri (mock_server_get_uri (server)); collection = mongoc_client_get_collection (client, "db", "collection"); collation = BCON_NEW ("collation", "{", "locale", BCON_UTF8 ("en_US"), "caseFirst", BCON_UTF8 ("lower"), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_append (opts, collation); if (wire >= WIRE_VERSION_COLLATION) { future = future_collection_find_and_modify_with_opts ( collection, tmp_bson ("{}"), opts, NULL, &error); request = mock_server_receives_command ( server, "db", MONGOC_QUERY_NONE, "{'findAndModify': 'collection'," " 'collation': { 'locale': 'en_US', 'caseFirst': 'lower'}" "}"); mock_server_replies_ok_and_destroys (request); ASSERT_OR_PRINT (future_get_bool (future), error); future_destroy (future); } else { bool ok = mongoc_collection_find_and_modify_with_opts ( collection, tmp_bson ("{}"), opts, NULL, &error); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); ASSERT (!ok); } bson_destroy (collation); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mock_server_destroy (server); }
static void test_find_and_modify (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; bson_iter_t iter; bson_iter_t citer; bson_t *update; bson_t doc = BSON_INITIALIZER; bson_t reply; mongoc_find_and_modify_opts_t *opts; client = test_framework_client_new (); ASSERT (client); collection = get_test_collection (client, "test_find_and_modify"); ASSERT (collection); BSON_APPEND_INT32 (&doc, "superduper", 77889); ASSERT_OR_PRINT (mongoc_collection_insert ( collection, MONGOC_INSERT_NONE, &doc, NULL, &error), error); update = BCON_NEW ("$set", "{", "superduper", BCON_INT32 (1234), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_update (opts, update); mongoc_find_and_modify_opts_set_fields (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_sort (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_flags (opts, MONGOC_FIND_AND_MODIFY_RETURN_NEW); ASSERT_OR_PRINT (mongoc_collection_find_and_modify_with_opts ( collection, &doc, opts, &reply, &error), error); assert (bson_iter_init_find (&iter, &reply, "value")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "superduper")); assert (BSON_ITER_HOLDS_INT32 (&citer)); assert (bson_iter_int32 (&citer) == 1234); assert (bson_iter_init_find (&iter, &reply, "lastErrorObject")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "updatedExisting")); assert (BSON_ITER_HOLDS_BOOL (&citer)); assert (bson_iter_bool (&citer)); bson_destroy (&reply); bson_destroy (update); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_destroy (collection); mongoc_client_destroy (client); bson_destroy (&doc); }
static void test_func_inherits_opts (void *ctx) { opt_inheritance_test_t *test = (opt_inheritance_test_t *) ctx; /* for example, test mongoc_collection_find_with_opts with no read pref, * with a read pref set on the collection (OPT_SOURCE_COLL), with an explicit * read pref (OPT_SOURCE_FUNC), or with one read pref on the collection and * a different one passed explicitly */ opt_source_t source_matrix[] = {OPT_SOURCE_NONE, test->opt_source, OPT_SOURCE_FUNC, test->opt_source | OPT_SOURCE_FUNC}; size_t i; func_ctx_t func_ctx; mock_rs_t *rs; mongoc_client_t *client; mongoc_database_t *db; mongoc_collection_t *collection; bson_t opts = BSON_INITIALIZER; mongoc_read_prefs_t *func_prefs = NULL; future_t *future; request_t *request; bson_t cmd = BSON_INITIALIZER; bool expect_secondary; bson_error_t error; /* one primary, one secondary */ rs = mock_rs_with_autoismaster (WIRE_VERSION_OP_MSG, true, 1, 0); /* we use read pref tags like "collection": "yes" to verify where the * pref was inherited from; ensure all secondaries match all tags */ mock_rs_tag_secondary (rs, 0, tmp_bson ("{'client': 'yes'," " 'database': 'yes'," " 'collection': 'yes'," " 'function': 'yes'}")); mock_rs_run (rs); /* iterate over all combinations of options sources: e.g., an option set on * collection and not function, on function not collection, both, neither */ for (i = 0; i < sizeof (source_matrix) / (sizeof (opt_source_t)); i++) { expect_secondary = false; func_prefs = NULL; bson_reinit (&cmd); bson_reinit (&opts); client = mongoc_client_new_from_uri (mock_rs_get_uri (rs)); if (source_matrix[i] & OPT_SOURCE_CLIENT) { set_client_opt (client, test->opt_type); } db = mongoc_client_get_database (client, "database"); if (source_matrix[i] & OPT_SOURCE_DB) { set_database_opt (db, test->opt_type); } collection = mongoc_database_get_collection (db, "collection"); if (source_matrix[i] & OPT_SOURCE_COLL) { set_collection_opt (collection, test->opt_type); } if (source_matrix[i] & OPT_SOURCE_FUNC) { set_func_opt (&opts, &func_prefs, test->opt_type); } func_ctx_init ( &func_ctx, test, client, db, collection, func_prefs, &opts); /* func_with_opts creates expected "cmd", like {insert: 'collection'} */ future = test->func_with_opts (&func_ctx, &cmd); if (source_matrix[i] != OPT_SOURCE_NONE) { add_expected_opt (source_matrix[i], test->opt_type, &cmd); if (test->opt_type == OPT_READ_PREFS) { expect_secondary = true; } } /* write commands send two OP_MSG sections */ if (test->n_sections == 2) { request = mock_rs_receives_msg (rs, 0, &cmd, tmp_bson ("{}")); } else { request = mock_rs_receives_msg (rs, 0, &cmd); } if (expect_secondary) { BSON_ASSERT (mock_rs_request_is_to_secondary (rs, request)); } else { BSON_ASSERT (mock_rs_request_is_to_primary (rs, request)); } if (func_ctx.cursor) { mock_server_replies_simple (request, "{'ok': 1," " 'cursor': {" " 'id': 0," " 'ns': 'db.collection'," " 'firstBatch': []}}"); BSON_ASSERT (!future_get_bool (future)); future_destroy (future); ASSERT_OR_PRINT (!mongoc_cursor_error (func_ctx.cursor, &error), error); } else { mock_server_replies_simple (request, "{'ok': 1}"); cleanup_future (future); } request_destroy (request); mongoc_read_prefs_destroy (func_prefs); func_ctx_cleanup (&func_ctx); mongoc_collection_destroy (collection); mongoc_database_destroy (db); mongoc_client_destroy (client); } bson_destroy (&cmd); bson_destroy (&opts); mock_rs_destroy (rs); }
static void test_exhaust_cursor (bool pooled) { mongoc_stream_t *stream; mongoc_write_concern_t *wr; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_cursor_t *cursor2; const bson_t *doc; bson_t q; bson_t b[10]; bson_t *bptr[10]; int i; bool r; uint32_t hint; bson_error_t error; bson_oid_t oid; int64_t timestamp1; if (pooled) { pool = test_framework_client_pool_new (); client = mongoc_client_pool_pop (pool); } else { client = test_framework_client_new (); } assert (client); collection = get_test_collection (client, "test_exhaust_cursor"); assert (collection); mongoc_collection_drop(collection, &error); wr = mongoc_write_concern_new (); mongoc_write_concern_set_journal (wr, true); /* bulk insert some records to work on */ { bson_init(&q); for (i = 0; i < 10; i++) { bson_init(&b[i]); bson_oid_init(&oid, NULL); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } BEGIN_IGNORE_DEPRECATIONS; ASSERT_OR_PRINT (mongoc_collection_insert_bulk ( collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error), error); END_IGNORE_DEPRECATIONS; } /* create a couple of cursors */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); } /* Read from the exhaust cursor, ensure that we're in exhaust where we * should be and ensure that an early destroy properly causes a disconnect * */ { r = mongoc_cursor_next (cursor, &doc); if (!r) { mongoc_cursor_error (cursor, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (cursor->in_exhaust); assert (client->in_exhaust); /* destroy the cursor, make sure a disconnect happened */ timestamp1 = get_timestamp (client, cursor); mongoc_cursor_destroy (cursor); assert (! client->in_exhaust); } /* Grab a new exhaust cursor, then verify that reading from that cursor * (putting the client into exhaust), breaks a mid-stream read from a * regular cursor */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (timestamp1 < get_timestamp (client, cursor2)); for (i = 0; i < 5; i++) { r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); doc = NULL; r = mongoc_cursor_next (cursor2, &doc); assert (!r); assert (!doc); mongoc_cursor_error(cursor2, &error); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); mongoc_cursor_destroy (cursor2); } /* make sure writes fail as well */ { BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error); END_IGNORE_DEPRECATIONS; assert (!r); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); } /* we're still in exhaust. * * 1. check that we can create a new cursor, as long as we don't read from it * 2. fully exhaust the exhaust cursor * 3. make sure that we don't disconnect at destroy * 4. make sure we can read the cursor we made during the exhuast */ { cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); stream = (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, cursor->hint); hint = cursor->hint; for (i = 1; i < 10; i++) { r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (!r); assert (!doc); mongoc_cursor_destroy (cursor); assert (stream == (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, hint)); r = mongoc_cursor_next (cursor2, &doc); assert (r); assert (doc); } bson_destroy(&q); for (i = 0; i < 10; i++) { bson_destroy(&b[i]); } ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_write_concern_destroy (wr); mongoc_cursor_destroy (cursor2); mongoc_collection_destroy(collection); if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } }
static void test_split_insert (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; bson_oid_t oid; bson_t **docs; bson_t reply = BSON_INITIALIZER; bson_error_t error; mongoc_server_stream_t *server_stream; int i; bool r; client = test_framework_client_new (); assert (client); collection = get_test_collection (client, "test_split_insert"); assert (collection); docs = (bson_t **)bson_malloc (sizeof(bson_t*) * 3000); for (i = 0; i < 3000; i++) { docs [i] = bson_new (); bson_oid_init (&oid, NULL); BSON_APPEND_OID (docs [i], "_id", &oid); } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert (&command, docs[0], write_flags, ++client->cluster.operation_id, true); for (i = 1; i < 3000; i++) { _mongoc_write_command_insert_append (&command, docs[i]); } server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, NULL, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); ASSERT_OR_PRINT (r, error); assert (result.nInserted == 3000); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); for (i = 0; i < 3000; i++) { bson_destroy (docs [i]); } bson_free (docs); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test1 (void) { mongoc_server_description_t *description; mongoc_collection_t *collection; mongoc_read_prefs_t *read_prefs; mongoc_cursor_t *cursor; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; const bson_t *doc; bson_error_t error; bool r; ha_node_t *replica; bson_t q; int i; bson_init(&q); if (use_pool) { pool = ha_replica_set_create_client_pool(replica_set); client = mongoc_client_pool_pop (pool); } else { client = ha_replica_set_create_client(replica_set); } collection = mongoc_client_get_collection(client, "test1", "test1"); MONGOC_DEBUG("Inserting test documents."); insert_test_docs(collection); MONGOC_INFO("Test documents inserted."); read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY); MONGOC_DEBUG("Sending query to a SECONDARY."); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 100, &q, NULL, read_prefs); BSON_ASSERT(cursor); BSON_ASSERT(!cursor->server_id); /* * Send OP_QUERY to server and get first document back. */ MONGOC_INFO("Sending OP_QUERY."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(cursor->sent); BSON_ASSERT(!cursor->done); BSON_ASSERT(cursor->rpc.reply.n_returned == 100); BSON_ASSERT(!cursor->end_of_event); /* * Make sure we queried a secondary. */ description = mongoc_topology_server_by_id(client->topology, cursor->server_id, &error); ASSERT_OR_PRINT (description, error); BSON_ASSERT (description->type != MONGOC_SERVER_RS_PRIMARY); mongoc_server_description_destroy(description); /* * Exhaust the items in our first OP_REPLY. */ MONGOC_DEBUG("Exhausting OP_REPLY."); for (i = 0; i < 98; i++) { r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(!cursor->done); BSON_ASSERT(!cursor->end_of_event); } /* * Finish off the last item in this OP_REPLY. */ MONGOC_INFO("Fetcing last doc from OP_REPLY."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(cursor->sent); BSON_ASSERT(!cursor->done); BSON_ASSERT(!cursor->end_of_event); /* * Determine which node we queried by using the server_id to * get the cluster information. */ BSON_ASSERT(cursor->server_id); replica = get_replica(client, cursor->server_id); /* * Kill the node we are communicating with. */ MONGOC_INFO("Killing replicaSet node to synthesize failure."); ha_node_kill(replica); /* * Try to fetch the next result set, expect failure. */ MONGOC_DEBUG("Checking for expected failure."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(!r); r = mongoc_cursor_error(cursor, &error); BSON_ASSERT(r); MONGOC_WARNING("%s", error.message); mongoc_cursor_destroy(cursor); mongoc_read_prefs_destroy(read_prefs); mongoc_collection_destroy(collection); if (use_pool) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy(client); } bson_destroy(&q); ha_node_restart(replica); }
static void test2 (void) { mongoc_read_prefs_t *read_prefs; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; const bson_t *doc; bson_error_t error; bool r; bson_t q; bson_init(&q); /* * Start by killing 2 of the replica set nodes. */ ha_node_kill(r1); ha_node_kill(r2); if (use_pool) { pool = ha_replica_set_create_client_pool(replica_set); client = mongoc_client_pool_pop (pool); } else { client = ha_replica_set_create_client(replica_set); } collection = mongoc_client_get_collection(client, "test2", "test2"); /* * Perform a query and ensure it fails with no nodes available. */ read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 100, 0, &q, NULL, read_prefs); /* * Try to submit OP_QUERY. Since it is SECONDARY PREFERRED, it should * succeed if there is any node up (which r3 is up). */ r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(!r); /* No docs */ /* No error, slaveOk was set */ ASSERT_OR_PRINT (!mongoc_cursor_error(cursor, &error), error); mongoc_read_prefs_destroy(read_prefs); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); if (use_pool) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy(client); } bson_destroy(&q); ha_node_restart(r1); ha_node_restart(r2); }
static void _test_heartbeat_events (bool pooled, bool succeeded) { context_t context; mock_server_t *server; mongoc_uri_t *uri; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; future_t *future; request_t *request; char *expected_json; bson_error_t error; context_init (&context); /* auto-respond to "foo" command */ server = mock_server_new (); mock_server_run (server); mock_server_autoresponds (server, responder, NULL, NULL); uri = mongoc_uri_copy (mock_server_get_uri (server)); mongoc_uri_set_option_as_int32 (uri, "serverSelectionTimeoutMS", 400); if (pooled) { pool = mongoc_client_pool_new (uri); pool_set_heartbeat_event_callbacks (pool, &context); client = mongoc_client_pool_pop (pool); } else { client = mongoc_client_new_from_uri (uri); client_set_heartbeat_event_callbacks (client, &context); } /* trigger "ismaster" handshake */ future = future_client_command_simple (client, "admin", tmp_bson ("{'foo': 1}"), NULL, NULL, &error); /* topology scanner calls ismaster once */ request = mock_server_receives_ismaster (server); if (succeeded) { mock_server_replies_ok_and_destroys (request); } else { mock_server_hangs_up (request); request_destroy (request); } /* pooled client opens new socket, handshakes it by calling ismaster again */ if (pooled && succeeded) { request = mock_server_receives_ismaster (server); mock_server_replies_ok_and_destroys (request); } if (succeeded) { /* "foo" command succeeds */ ASSERT_OR_PRINT (future_get_bool (future), error); } else { ASSERT (!future_get_bool (future)); } if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } /* even if pooled, only topology scanner sends events, so we get one pair */ if (succeeded) { expected_json = bson_strdup_printf ( "{'0': {'heartbeat_started_event': {'host': '%s'}}," " '1': {'heartbeat_succeeded_event': {'host': '%s'}}}", mock_server_get_host_and_port (server), mock_server_get_host_and_port (server)); } else { expected_json = bson_strdup_printf ( "{'0': {'heartbeat_started_event': {'host': '%s'}}," " '1': {'heartbeat_failed_event': {'host': '%s'}}}", mock_server_get_host_and_port (server), mock_server_get_host_and_port (server)); } check_json_apm_events (&context.events, tmp_bson (expected_json)); future_destroy (future); bson_free (expected_json); mongoc_uri_destroy (uri); mock_server_destroy (server); context_destroy (&context); }
static void test_bypass_validation (void *context) { mongoc_collection_t *collection; bson_t reply = BSON_INITIALIZER; mongoc_bulk_operation_t *bulk; mongoc_database_t *database; mongoc_write_concern_t *wr; mongoc_client_t *client; bson_error_t error; bson_t *options; char *collname; char *dbname; int r; int i; client = test_framework_client_new (); assert (client); dbname = gen_collection_name ("dbtest"); collname = gen_collection_name ("bypass"); database = mongoc_client_get_database (client, dbname); collection = mongoc_database_get_collection (database, collname); assert (collection); options = tmp_bson ("{'validator': {'number': {'$gte': 5}}, 'validationAction': 'error'}"); ASSERT_OR_PRINT (mongoc_database_create_collection (database, collname, options, &error), error); /* {{{ Default fails validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT(!r); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, 121, "Document failed validation"); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ bypass_document_validation=false Fails validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); mongoc_bulk_operation_set_bypass_document_validation (bulk, false); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT(!r); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, 121, "Document failed validation"); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ bypass_document_validation=true ignores validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); mongoc_bulk_operation_set_bypass_document_validation (bulk, true); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT_OR_PRINT(r, error); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ w=0 and bypass_document_validation=set fails */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); wr = mongoc_write_concern_new (); mongoc_write_concern_set_w (wr, 0); mongoc_bulk_operation_set_write_concern (bulk, wr); mongoc_bulk_operation_set_bypass_document_validation (bulk, true); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT_OR_PRINT(!r, error); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set bypassDocumentValidation for unacknowledged writes"); mongoc_bulk_operation_destroy (bulk); mongoc_write_concern_destroy (wr); /* }}} */ ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }