static void test_split_insert (void) { mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; bson_oid_t oid; bson_t **docs; bson_t reply = BSON_INITIALIZER; bson_error_t error; int i; bool r; client = test_framework_client_new (NULL); assert (client); collection = get_test_collection (client, "test_split_insert"); assert (collection); docs = bson_malloc (sizeof(bson_t*) * 3000); for (i = 0; i < 3000; i++) { docs [i] = bson_new (); bson_oid_init (&oid, NULL); BSON_APPEND_OID (docs [i], "_id", &oid); } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert (&command, (const bson_t * const *)docs, 3000, true, true); _mongoc_write_command_execute (&command, client, 0, collection->db, collection->collection, NULL, 0, &result); r = _mongoc_write_result_complete (&result, &reply, &error); assert (r); assert (result.nInserted == 3000); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); r = mongoc_collection_drop (collection, &error); assert (r); for (i = 0; i < 3000; i++) { bson_destroy (docs [i]); } bson_free (docs); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_find_and_modify_write_concern_wire_32_failure (void *context) { mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; mongoc_find_and_modify_opts_t *opts; bson_t reply; bson_t query = BSON_INITIALIZER; bson_t *update; bool success; mongoc_write_concern_t *wc; client = test_framework_client_new (); collection = get_test_collection (client, "writeFailure"); wc = mongoc_write_concern_new (); mongoc_write_concern_set_w (wc, 42); mongoc_collection_set_write_concern (collection, wc); /* Find Zlatan Ibrahimovic, the striker */ BSON_APPEND_UTF8 (&query, "firstname", "Zlatan"); BSON_APPEND_UTF8 (&query, "lastname", "Ibrahimovic"); BSON_APPEND_UTF8 (&query, "profession", "Football player"); BSON_APPEND_INT32 (&query, "age", 34); BSON_APPEND_INT32 ( &query, "goals", (16 + 35 + 23 + 57 + 16 + 14 + 28 + 84) + (1 + 6 + 62)); /* Add his football position */ update = BCON_NEW ("$set", "{", "position", BCON_UTF8 ("striker"), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_update (opts, update); /* Create the document if it didn't exist, and return the updated document */ mongoc_find_and_modify_opts_set_flags ( opts, MONGOC_FIND_AND_MODIFY_UPSERT | MONGOC_FIND_AND_MODIFY_RETURN_NEW); success = mongoc_collection_find_and_modify_with_opts ( collection, &query, opts, &reply, &error); ASSERT (!success); ASSERT_ERROR_CONTAINS ( error, MONGOC_ERROR_WRITE_CONCERN, 100, "Write Concern error:"); bson_destroy (&reply); bson_destroy (update); bson_destroy (&query); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_drop (collection, NULL); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_invalid_write_concern (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; mongoc_write_concern_t *write_concern; mongoc_server_stream_t *server_stream; bson_t *doc; bson_t reply = BSON_INITIALIZER; bson_error_t error; bool r; client = test_framework_client_new (); assert(client); collection = get_test_collection(client, "test_invalid_write_concern"); assert(collection); write_concern = mongoc_write_concern_new(); assert(write_concern); mongoc_write_concern_set_w(write_concern, 0); mongoc_write_concern_set_journal(write_concern, true); assert(!mongoc_write_concern_is_valid (write_concern)); doc = BCON_NEW("_id", BCON_INT32(0)); _mongoc_write_command_init_insert(&command, doc, write_flags, ++client->cluster.operation_id, true); _mongoc_write_result_init (&result); server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, write_concern, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); assert(!r); ASSERT_CMPINT(error.domain, ==, MONGOC_ERROR_COMMAND); ASSERT_CMPINT(error.code, ==, MONGOC_ERROR_COMMAND_INVALID_ARG); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); bson_destroy(doc); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy(collection); mongoc_client_destroy(client); mongoc_write_concern_destroy(write_concern); }
static void _test_topology_events (bool pooled) { mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; context_t context; bool r; bson_error_t error; bson_iter_t events_iter; bson_iter_t event_iter; uint32_t i; context_init (&context); if (pooled) { pool = test_framework_client_pool_new (); pool_set_topology_event_callbacks (pool, &context); client = mongoc_client_pool_pop (pool); } else { client = test_framework_client_new (); client_set_topology_event_callbacks (client, &context); } r = mongoc_client_command_simple (client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error); ASSERT_OR_PRINT (r, error); if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } /* first event is topology opening */ bson_iter_init (&events_iter, &context.events); bson_iter_next (&events_iter); ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_opening_event")); /* last event is topology closed */ for (i = 1; i < context.n_events; i++) { ASSERT (bson_iter_next (&events_iter)); } ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_closed_event")); /* no more events */ ASSERT (!bson_iter_next (&events_iter)); context_destroy (&context); }
static void test_invalid_write_concern (void) { mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; mongoc_write_concern_t *write_concern; bson_t **docs; bson_t reply = BSON_INITIALIZER; bson_error_t error; bool r; client = test_framework_client_new (NULL); assert(client); collection = get_test_collection(client, "test_invalid_write_concern"); assert(collection); write_concern = mongoc_write_concern_new(); assert(write_concern); mongoc_write_concern_set_w(write_concern, 0); mongoc_write_concern_set_journal(write_concern, true); assert(!_mongoc_write_concern_is_valid(write_concern)); docs = bson_malloc(sizeof(bson_t*)); docs[0] = BCON_NEW("_id", BCON_INT32(0)); _mongoc_write_command_init_insert(&command, (const bson_t * const *)docs, 1, true, true); _mongoc_write_result_init (&result); _mongoc_write_command_execute (&command, client, 0, collection->db, collection->collection, write_concern, 0, &result); r = _mongoc_write_result_complete (&result, &reply, &error); assert(!r); assert(error.domain = MONGOC_ERROR_COMMAND); assert(error.code = MONGOC_ERROR_COMMAND_INVALID_ARG); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); bson_destroy(docs[0]); bson_free(docs); mongoc_collection_destroy(collection); mongoc_client_destroy(client); mongoc_write_concern_destroy(write_concern); }
void call_ismaster (bson_t *reply) { bson_t ismaster = BSON_INITIALIZER; mongoc_client_t *client; bool r; BSON_APPEND_INT32 (&ismaster, "isMaster", 1); client = test_framework_client_new (NULL); r = mongoc_client_command_simple (client, "admin", &ismaster, NULL, reply, NULL); assert (r); }
static void test_topology_events_disabled (void) { mongoc_client_t *client; context_t context; bool r; bson_error_t error; bson_iter_t events_iter; bson_iter_t event_iter; uint32_t i; context_init (&context); client = test_framework_client_new (); client_set_topology_event_callbacks (client, &context); r = mongoc_client_command_simple ( client, "admin", tmp_bson ("{'ping': 1}"), NULL, NULL, &error); ASSERT_OR_PRINT (r, error); /* disable callbacks before destroying so we don't see a topology closed * event */ mongoc_client_set_apm_callbacks (client, NULL, NULL); mongoc_client_destroy (client); /* first event is topology opening */ bson_iter_init (&events_iter, &context.events); bson_iter_next (&events_iter); ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (bson_iter_find (&event_iter, "topology_opening_event")); /* move forward to the last event */ for (i = 1; i < context.n_events; i++) { ASSERT (bson_iter_next (&events_iter)); } /* verify we didn't receive a topology closed event */ ASSERT (bson_iter_recurse (&events_iter, &event_iter)); ASSERT (!bson_iter_find (&event_iter, "topology_closed_event")); /* no more events */ ASSERT (!bson_iter_next (&events_iter)); context_destroy (&context); }
static void test_with_transaction_timeout (void *ctx) { mongoc_client_t *client; mongoc_client_session_t *session; bson_error_t error; bool res; client = test_framework_client_new (); session = mongoc_client_start_session (client, NULL, &error); ASSERT_OR_PRINT (session, error); session->with_txn_timeout_ms = 10; /* Test Case 1: Test that if the callback returns an error with the TransientTransactionError label and we have exceeded the timeout, withTransaction fails. */ res = mongoc_client_session_with_transaction ( session, with_transaction_fail_transient_txn, NULL, NULL, &error); ASSERT (!res); /* Test Case 2: If committing returns an error with the UnknownTransactionCommitResult label and we have exceeded the timeout, withTransaction fails. */ session->fail_commit_label = UNKNOWN_COMMIT_RESULT; res = mongoc_client_session_with_transaction ( session, with_transaction_do_nothing, NULL, NULL, &error); ASSERT (!res); /* Test Case 3: If committing returns an error with the TransientTransactionError label and we have exceeded the timeout, withTransaction fails. */ session->fail_commit_label = TRANSIENT_TXN_ERR; res = mongoc_client_session_with_transaction ( session, with_transaction_do_nothing, NULL, NULL, &error); ASSERT (!res); mongoc_client_session_destroy (session); mongoc_client_destroy (client); }
static void test_find_and_modify (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; bson_iter_t iter; bson_iter_t citer; bson_t *update; bson_t doc = BSON_INITIALIZER; bson_t reply; mongoc_find_and_modify_opts_t *opts; client = test_framework_client_new (); ASSERT (client); collection = get_test_collection (client, "test_find_and_modify"); ASSERT (collection); BSON_APPEND_INT32 (&doc, "superduper", 77889); ASSERT_OR_PRINT (mongoc_collection_insert ( collection, MONGOC_INSERT_NONE, &doc, NULL, &error), error); update = BCON_NEW ("$set", "{", "superduper", BCON_INT32 (1234), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_update (opts, update); mongoc_find_and_modify_opts_set_fields (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_sort (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_flags (opts, MONGOC_FIND_AND_MODIFY_RETURN_NEW); ASSERT_OR_PRINT (mongoc_collection_find_and_modify_with_opts ( collection, &doc, opts, &reply, &error), error); assert (bson_iter_init_find (&iter, &reply, "value")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "superduper")); assert (BSON_ITER_HOLDS_INT32 (&citer)); assert (bson_iter_int32 (&citer) == 1234); assert (bson_iter_init_find (&iter, &reply, "lastErrorObject")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "updatedExisting")); assert (BSON_ITER_HOLDS_BOOL (&citer)); assert (bson_iter_bool (&citer)); bson_destroy (&reply); bson_destroy (update); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_destroy (collection); mongoc_client_destroy (client); bson_destroy (&doc); }
static void test_split_insert (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; bson_oid_t oid; bson_t **docs; bson_t reply = BSON_INITIALIZER; bson_error_t error; mongoc_server_stream_t *server_stream; int i; bool r; client = test_framework_client_new (); assert (client); collection = get_test_collection (client, "test_split_insert"); assert (collection); docs = (bson_t **)bson_malloc (sizeof(bson_t*) * 3000); for (i = 0; i < 3000; i++) { docs [i] = bson_new (); bson_oid_init (&oid, NULL); BSON_APPEND_OID (docs [i], "_id", &oid); } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert (&command, docs[0], write_flags, ++client->cluster.operation_id, true); for (i = 1; i < 3000; i++) { _mongoc_write_command_insert_append (&command, docs[i]); } server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, NULL, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); ASSERT_OR_PRINT (r, error); assert (result.nInserted == 3000); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); for (i = 0; i < 3000; i++) { bson_destroy (docs [i]); } bson_free (docs); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_bypass_validation (void *context) { mongoc_collection_t *collection; bson_t reply = BSON_INITIALIZER; mongoc_bulk_operation_t *bulk; mongoc_database_t *database; mongoc_write_concern_t *wr; mongoc_client_t *client; bson_error_t error; bson_t *options; char *collname; char *dbname; int r; int i; client = test_framework_client_new (); assert (client); dbname = gen_collection_name ("dbtest"); collname = gen_collection_name ("bypass"); database = mongoc_client_get_database (client, dbname); collection = mongoc_database_get_collection (database, collname); assert (collection); options = tmp_bson ("{'validator': {'number': {'$gte': 5}}, 'validationAction': 'error'}"); ASSERT_OR_PRINT (mongoc_database_create_collection (database, collname, options, &error), error); /* {{{ Default fails validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT(!r); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, 121, "Document failed validation"); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ bypass_document_validation=false Fails validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); mongoc_bulk_operation_set_bypass_document_validation (bulk, false); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT(!r); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, 121, "Document failed validation"); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ bypass_document_validation=true ignores validation */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); mongoc_bulk_operation_set_bypass_document_validation (bulk, true); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT_OR_PRINT(r, error); mongoc_bulk_operation_destroy (bulk); /* }}} */ /* {{{ w=0 and bypass_document_validation=set fails */ bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); wr = mongoc_write_concern_new (); mongoc_write_concern_set_w (wr, 0); mongoc_bulk_operation_set_write_concern (bulk, wr); mongoc_bulk_operation_set_bypass_document_validation (bulk, true); for (i = 0; i < 3; i++) { bson_t *doc = tmp_bson (bson_strdup_printf ("{'number': 3, 'high': %d }", i)); mongoc_bulk_operation_insert (bulk, doc); } r = mongoc_bulk_operation_execute (bulk, &reply, &error); ASSERT_OR_PRINT(!r, error); ASSERT_ERROR_CONTAINS (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set bypassDocumentValidation for unacknowledged writes"); mongoc_bulk_operation_destroy (bulk); mongoc_write_concern_destroy (wr); /* }}} */ ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_exhaust_cursor (bool pooled) { mongoc_stream_t *stream; mongoc_write_concern_t *wr; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_cursor_t *cursor2; const bson_t *doc; bson_t q; bson_t b[10]; bson_t *bptr[10]; int i; bool r; uint32_t hint; bson_error_t error; bson_oid_t oid; int64_t timestamp1; if (pooled) { pool = test_framework_client_pool_new (); client = mongoc_client_pool_pop (pool); } else { client = test_framework_client_new (); } assert (client); collection = get_test_collection (client, "test_exhaust_cursor"); assert (collection); mongoc_collection_drop(collection, &error); wr = mongoc_write_concern_new (); mongoc_write_concern_set_journal (wr, true); /* bulk insert some records to work on */ { bson_init(&q); for (i = 0; i < 10; i++) { bson_init(&b[i]); bson_oid_init(&oid, NULL); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } BEGIN_IGNORE_DEPRECATIONS; ASSERT_OR_PRINT (mongoc_collection_insert_bulk ( collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error), error); END_IGNORE_DEPRECATIONS; } /* create a couple of cursors */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); } /* Read from the exhaust cursor, ensure that we're in exhaust where we * should be and ensure that an early destroy properly causes a disconnect * */ { r = mongoc_cursor_next (cursor, &doc); if (!r) { mongoc_cursor_error (cursor, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (cursor->in_exhaust); assert (client->in_exhaust); /* destroy the cursor, make sure a disconnect happened */ timestamp1 = get_timestamp (client, cursor); mongoc_cursor_destroy (cursor); assert (! client->in_exhaust); } /* Grab a new exhaust cursor, then verify that reading from that cursor * (putting the client into exhaust), breaks a mid-stream read from a * regular cursor */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (timestamp1 < get_timestamp (client, cursor2)); for (i = 0; i < 5; i++) { r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); doc = NULL; r = mongoc_cursor_next (cursor2, &doc); assert (!r); assert (!doc); mongoc_cursor_error(cursor2, &error); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); mongoc_cursor_destroy (cursor2); } /* make sure writes fail as well */ { BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error); END_IGNORE_DEPRECATIONS; assert (!r); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); } /* we're still in exhaust. * * 1. check that we can create a new cursor, as long as we don't read from it * 2. fully exhaust the exhaust cursor * 3. make sure that we don't disconnect at destroy * 4. make sure we can read the cursor we made during the exhuast */ { cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); stream = (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, cursor->hint); hint = cursor->hint; for (i = 1; i < 10; i++) { r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (!r); assert (!doc); mongoc_cursor_destroy (cursor); assert (stream == (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, hint)); r = mongoc_cursor_next (cursor2, &doc); assert (r); assert (doc); } bson_destroy(&q); for (i = 0; i < 10; i++) { bson_destroy(&b[i]); } ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_write_concern_destroy (wr); mongoc_cursor_destroy (cursor2); mongoc_collection_destroy(collection); if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } }