static void * background_mongoc_collection_insert_bulk (void *data) { future_t *future = (future_t *) data; future_value_t return_value; return_value.type = future_value_bool_type; future_value_set_bool ( &return_value, mongoc_collection_insert_bulk ( future_value_get_mongoc_collection_ptr (future_get_param (future, 0)), future_value_get_mongoc_insert_flags_t (future_get_param (future, 1)), future_value_get_const_bson_ptr_ptr (future_get_param (future, 2)), future_value_get_uint32_t (future_get_param (future, 3)), future_value_get_const_mongoc_write_concern_ptr (future_get_param (future, 4)), future_value_get_bson_error_ptr (future_get_param (future, 5)) )); future_resolve (future, return_value); return NULL; }
static void test_exhaust_cursor (void) { mongoc_write_concern_t *wr; mongoc_client_t *client; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_cursor_t *cursor2; mongoc_stream_t *stream; mongoc_cluster_node_t *node; const bson_t *doc; bson_t q; bson_t b[10]; bson_t *bptr[10]; int i; bool r; bson_error_t error; bson_oid_t oid; client = mongoc_client_new (gTestUri); assert (client); collection = get_test_collection (client, "test_exhaust_cursor"); assert (collection); mongoc_collection_drop(collection, &error); wr = mongoc_write_concern_new (); mongoc_write_concern_set_journal (wr, true); /* bulk insert some records to work on */ { bson_init(&q); for (i = 0; i < 10; i++) { bson_init(&b[i]); bson_oid_init(&oid, NULL); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error); END_IGNORE_DEPRECATIONS; if (!r) { MONGOC_WARNING("Insert bulk failure: %s\n", error.message); } assert(r); } /* create a couple of cursors */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); } /* Read from the exhaust cursor, ensure that we're in exhaust where we * should be and ensure that an early destroy properly causes a disconnect * */ { r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); assert (cursor->in_exhaust); assert (client->in_exhaust); node = &client->cluster.nodes[cursor->hint - 1]; stream = node->stream; mongoc_cursor_destroy (cursor); /* make sure a disconnect happened */ assert (stream != node->stream); assert (! client->in_exhaust); } /* Grab a new exhaust cursor, then verify that reading from that cursor * (putting the client into exhaust), breaks a mid-stream read from a * regular cursor */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); for (i = 0; i < 5; i++) { r = mongoc_cursor_next (cursor2, &doc); assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); doc = NULL; r = mongoc_cursor_next (cursor2, &doc); assert (!r); assert (!doc); mongoc_cursor_error(cursor2, &error); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); mongoc_cursor_destroy (cursor2); } /* make sure writes fail as well */ { BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error); END_IGNORE_DEPRECATIONS; assert (!r); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); } /* we're still in exhaust. * * 1. check that we can create a new cursor, as long as we don't read from it * 2. fully exhaust the exhaust cursor * 3. make sure that we don't disconnect at destroy * 4. make sure we can read the cursor we made during the exhuast */ { cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); node = &client->cluster.nodes[cursor->hint - 1]; stream = node->stream; for (i = 1; i < 10; i++) { r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (!r); assert (!doc); mongoc_cursor_destroy (cursor); assert (stream == node->stream); r = mongoc_cursor_next (cursor2, &doc); assert (r); assert (doc); } bson_destroy(&q); for (i = 0; i < 10; i++) { bson_destroy(&b[i]); } r = mongoc_collection_drop (collection, &error); assert (r); mongoc_write_concern_destroy (wr); mongoc_cursor_destroy (cursor2); mongoc_collection_destroy(collection); mongoc_client_destroy (client); }
static void test_many_return (void) { mongoc_collection_t *collection; mongoc_client_t *client; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc = NULL; bson_oid_t oid; bson_t query = BSON_INITIALIZER; bson_t **docs; size_t len; bool r; int i; client = mongoc_client_new (gTestUri); ASSERT (client); collection = get_test_collection (client, "test_many_return"); ASSERT (collection); docs = bson_malloc (sizeof(bson_t*) * 5000); for (i = 0; i < 5000; i++) { docs [i] = bson_new (); bson_oid_init (&oid, NULL); BSON_APPEND_OID (docs [i], "_id", &oid); } BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)docs, 5000, NULL, &error); END_IGNORE_DEPRECATIONS; assert (r); for (i = 0; i < 5000; i++) { bson_destroy (docs [i]); } bson_free (docs); cursor = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 6000, &query, NULL, NULL); assert (cursor); bson_destroy (&query); i = 0; while (mongoc_cursor_next (cursor, &doc)) { assert (doc); i++; } assert (i == 5000); r = mongoc_cursor_next (cursor, &doc); assert (!r); mongoc_cursor_destroy (cursor); r = mongoc_collection_drop (collection, &error); assert (r); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_insert_bulk (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_context_t *context; bson_error_t error; bool r; bson_oid_t oid; unsigned i; bson_t q; bson_t b[10]; bson_t *bptr[10]; int64_t count; client = mongoc_client_new(gTestUri); ASSERT (client); collection = mongoc_client_get_collection(client, "test", "test"); ASSERT (collection); mongoc_collection_drop(collection, &error); context = bson_context_new(BSON_CONTEXT_NONE); ASSERT (context); bson_init(&q); bson_append_int32(&q, "n", -1, 0); for (i = 0; i < 10; i++) { bson_init(&b[i]); bson_oid_init(&oid, context); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, NULL, &error); if (!r) { MONGOC_WARNING("%s\n", error.message); } ASSERT (r); count = mongoc_collection_count (collection, MONGOC_QUERY_NONE, &q, 0, 0, NULL, &error); ASSERT (count == 5); for (i = 8; i < 10; i++) { bson_destroy(&b[i]); bson_init(&b[i]); bson_oid_init(&oid, context); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, NULL, &error); ASSERT (!r); ASSERT (error.code == 11000); count = mongoc_collection_count (collection, MONGOC_QUERY_NONE, &q, 0, 0, NULL, &error); ASSERT (count == 5); r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_CONTINUE_ON_ERROR, (const bson_t **)bptr, 10, NULL, &error); ASSERT (!r); ASSERT (error.code == 11000); count = mongoc_collection_count (collection, MONGOC_QUERY_NONE, &q, 0, 0, NULL, &error); ASSERT (count == 6); bson_destroy(&q); for (i = 0; i < 10; i++) { bson_destroy(&b[i]); } mongoc_collection_destroy(collection); bson_context_destroy(context); mongoc_client_destroy(client); }
static void test_exhaust_cursor (bool pooled) { mongoc_stream_t *stream; mongoc_write_concern_t *wr; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_cursor_t *cursor2; const bson_t *doc; bson_t q; bson_t b[10]; bson_t *bptr[10]; int i; bool r; uint32_t hint; bson_error_t error; bson_oid_t oid; int64_t timestamp1; if (pooled) { pool = test_framework_client_pool_new (); client = mongoc_client_pool_pop (pool); } else { client = test_framework_client_new (); } assert (client); collection = get_test_collection (client, "test_exhaust_cursor"); assert (collection); mongoc_collection_drop(collection, &error); wr = mongoc_write_concern_new (); mongoc_write_concern_set_journal (wr, true); /* bulk insert some records to work on */ { bson_init(&q); for (i = 0; i < 10; i++) { bson_init(&b[i]); bson_oid_init(&oid, NULL); bson_append_oid(&b[i], "_id", -1, &oid); bson_append_int32(&b[i], "n", -1, i % 2); bptr[i] = &b[i]; } BEGIN_IGNORE_DEPRECATIONS; ASSERT_OR_PRINT (mongoc_collection_insert_bulk ( collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error), error); END_IGNORE_DEPRECATIONS; } /* create a couple of cursors */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); } /* Read from the exhaust cursor, ensure that we're in exhaust where we * should be and ensure that an early destroy properly causes a disconnect * */ { r = mongoc_cursor_next (cursor, &doc); if (!r) { mongoc_cursor_error (cursor, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (cursor->in_exhaust); assert (client->in_exhaust); /* destroy the cursor, make sure a disconnect happened */ timestamp1 = get_timestamp (client, cursor); mongoc_cursor_destroy (cursor); assert (! client->in_exhaust); } /* Grab a new exhaust cursor, then verify that reading from that cursor * (putting the client into exhaust), breaks a mid-stream read from a * regular cursor */ { cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q, NULL, NULL); r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); assert (timestamp1 < get_timestamp (client, cursor2)); for (i = 0; i < 5; i++) { r = mongoc_cursor_next (cursor2, &doc); if (!r) { mongoc_cursor_error (cursor2, &error); fprintf (stderr, "cursor error: %s\n", error.message); } assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); doc = NULL; r = mongoc_cursor_next (cursor2, &doc); assert (!r); assert (!doc); mongoc_cursor_error(cursor2, &error); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); mongoc_cursor_destroy (cursor2); } /* make sure writes fail as well */ { BEGIN_IGNORE_DEPRECATIONS; r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE, (const bson_t **)bptr, 10, wr, &error); END_IGNORE_DEPRECATIONS; assert (!r); assert (error.domain == MONGOC_ERROR_CLIENT); assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST); } /* we're still in exhaust. * * 1. check that we can create a new cursor, as long as we don't read from it * 2. fully exhaust the exhaust cursor * 3. make sure that we don't disconnect at destroy * 4. make sure we can read the cursor we made during the exhuast */ { cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, NULL); stream = (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, cursor->hint); hint = cursor->hint; for (i = 1; i < 10; i++) { r = mongoc_cursor_next (cursor, &doc); assert (r); assert (doc); } r = mongoc_cursor_next (cursor, &doc); assert (!r); assert (!doc); mongoc_cursor_destroy (cursor); assert (stream == (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, hint)); r = mongoc_cursor_next (cursor2, &doc); assert (r); assert (doc); } bson_destroy(&q); for (i = 0; i < 10; i++) { bson_destroy(&b[i]); } ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_write_concern_destroy (wr); mongoc_cursor_destroy (cursor2); mongoc_collection_destroy(collection); if (pooled) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy (client); } }