static bool _mongoc_cursor_cursorid_get_more (mongoc_cursor_t *cursor) { mongoc_cursor_cursorid_t *cid; mongoc_server_stream_t *server_stream; bson_t command; bool ret; ENTRY; cid = (mongoc_cursor_cursorid_t *)cursor->iface_data; BSON_ASSERT (cid); server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { RETURN (false); } if (_use_find_command (cursor, server_stream)) { if (!_mongoc_cursor_prepare_getmore_command (cursor, &command)) { mongoc_server_stream_cleanup (server_stream); RETURN (false); } ret = _mongoc_cursor_cursorid_refresh_from_command (cursor, &command); bson_destroy (&command); } else { ret = _mongoc_cursor_op_getmore (cursor, server_stream); cid->in_reader = ret; } mongoc_server_stream_cleanup (server_stream); RETURN (ret); }
static void test_invalid_write_concern (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; mongoc_write_concern_t *write_concern; mongoc_server_stream_t *server_stream; bson_t *doc; bson_t reply = BSON_INITIALIZER; bson_error_t error; bool r; client = test_framework_client_new (); assert(client); collection = get_test_collection(client, "test_invalid_write_concern"); assert(collection); write_concern = mongoc_write_concern_new(); assert(write_concern); mongoc_write_concern_set_w(write_concern, 0); mongoc_write_concern_set_journal(write_concern, true); assert(!mongoc_write_concern_is_valid (write_concern)); doc = BCON_NEW("_id", BCON_INT32(0)); _mongoc_write_command_init_insert(&command, doc, write_flags, ++client->cluster.operation_id, true); _mongoc_write_result_init (&result); server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, write_concern, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); assert(!r); ASSERT_CMPINT(error.domain, ==, MONGOC_ERROR_COMMAND); ASSERT_CMPINT(error.code, ==, MONGOC_ERROR_COMMAND_INVALID_ARG); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); bson_destroy(doc); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy(collection); mongoc_client_destroy(client); mongoc_write_concern_destroy(write_concern); }
/** * mongoc_client_command_simple: * @client: A mongoc_client_t. * @db_name: The namespace, such as "admin". * @command: The command to execute. * @read_prefs: The read preferences or NULL. * @reply: A location for the reply document or NULL. * @error: A location for the error, or NULL. * * This wrapper around mongoc_client_command() aims to make it simpler to * run a command and check the output result. * * false is returned if the command failed to be delivered or if the execution * of the command failed. For example, a command that returns {'ok': 0} will * result in this function returning false. * * To allow the caller to disambiguate between command execution failure and * failure to send the command, reply will always be set if non-NULL. The * caller should release this with bson_destroy(). * * Returns: true if the command executed and resulted in success. Otherwise * false and @error is set. @reply is always set, either to the resulting * document or an empty bson document upon failure. */ bool mongoc_client_command_simple (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { mongoc_cluster_t *cluster; mongoc_server_stream_t *server_stream; mongoc_apply_read_prefs_result_t result = READ_PREFS_RESULT_INIT; bool ret = false; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); cluster = &client->cluster; server_stream = mongoc_cluster_stream_for_reads (cluster, read_prefs, error); if (!server_stream) { if (reply) { bson_init (reply); } GOTO (done); } apply_read_preferences (read_prefs, server_stream, command, MONGOC_QUERY_NONE, &result); ret = mongoc_cluster_run_command (cluster, server_stream->stream, server_stream->sd->id, result.flags, db_name, result.query_with_read_prefs, reply, error); done: mongoc_server_stream_cleanup (server_stream); apply_read_prefs_result_cleanup (&result); RETURN (ret); }
static getmore_type_t _getmore_type (mongoc_cursor_t *cursor) { mongoc_server_stream_t *server_stream; bool use_cmd; data_cmd_t *data = (data_cmd_t *) cursor->impl.data; if (data->getmore_type != UNKNOWN) { return data->getmore_type; } server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { return UNKNOWN; } use_cmd = server_stream->sd->max_wire_version >= WIRE_VERSION_FIND_CMD && !_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST); data->getmore_type = use_cmd ? GETMORE_CMD : OP_GETMORE; mongoc_server_stream_cleanup (server_stream); return data->getmore_type; }
void _mongoc_client_kill_cursor (mongoc_client_t *client, uint32_t server_id, int64_t cursor_id, const char *db, const char *collection) { mongoc_server_stream_t *server_stream; ENTRY; BSON_ASSERT (client); BSON_ASSERT (cursor_id); /* don't attempt reconnect if server unavailable, and ignore errors */ server_stream = mongoc_cluster_stream_for_server (&client->cluster, server_id, false /* reconnect_ok */, NULL /* error */); if (!server_stream) { return; } if (db && collection && server_stream->sd->max_wire_version >= WIRE_VERSION_KILLCURSORS_CMD) { _mongoc_client_killcursors_command (&client->cluster, server_stream, cursor_id, db, collection); } else { _mongoc_client_op_killcursors (&client->cluster, server_stream, cursor_id); } mongoc_server_stream_cleanup (server_stream); EXIT; }
uint32_t mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk, /* IN */ bson_t *reply, /* OUT */ bson_error_t *error) /* OUT */ { mongoc_cluster_t *cluster; mongoc_write_command_t *command; mongoc_server_stream_t *server_stream; bool ret; uint32_t offset = 0; int i; ENTRY; BSON_ASSERT (bulk); if (reply) { bson_init (reply); } if (!bulk->client) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a client " "and one has not been set."); RETURN (false); } cluster = &bulk->client->cluster; if (bulk->executed) { _mongoc_write_result_destroy (&bulk->result); _mongoc_write_result_init (&bulk->result); } bulk->executed = true; if (!bulk->database) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a database " "and one has not been set."); RETURN (false); } else if (!bulk->collection) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a collection " "and one has not been set."); RETURN (false); } /* error stored by functions like mongoc_bulk_operation_insert that * can't report errors immediately */ if (bulk->result.error.domain) { if (error) { memcpy (error, &bulk->result.error, sizeof (bson_error_t)); } RETURN (false); } if (!bulk->commands.len) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot do an empty bulk write"); RETURN (false); } for (i = 0; i < bulk->commands.len; i++) { if (bulk->server_id) { server_stream = mongoc_cluster_stream_for_server ( cluster, bulk->server_id, true /* reconnect_ok */, error); } else { server_stream = mongoc_cluster_stream_for_writes (cluster, error); } if (!server_stream) { RETURN (false); } command = &_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i); _mongoc_write_command_execute (command, bulk->client, server_stream, bulk->database, bulk->collection, bulk->write_concern, offset, bulk->session, &bulk->result); bulk->server_id = server_stream->sd->id; if (bulk->result.failed && (bulk->flags.ordered || bulk->result.must_stop)) { mongoc_server_stream_cleanup (server_stream); GOTO (cleanup); } offset += command->n_documents; mongoc_server_stream_cleanup (server_stream); } cleanup: ret = MONGOC_WRITE_RESULT_COMPLETE (&bulk->result, bulk->client->error_api_version, bulk->write_concern, MONGOC_ERROR_COMMAND /* err domain */, reply, error); RETURN (ret ? bulk->server_id : 0); }
static void _mongoc_write_opmsg (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t index_offset, mongoc_client_session_t *cs, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; bson_t cmd; bson_t reply; bool ret = false; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_document_count; uint32_t header; uint32_t payload_batch_size = 0; uint32_t payload_total_offset = 0; bool ship_it = false; int document_count = 0; int32_t len; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); /* MongoDB has a extra allowance to allow updating 16mb document, * as the update operators would otherwise overflow the 16mb object limit */ #define BSON_OBJECT_ALLOWANCE (16 * 1024) max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); max_document_count = mongoc_server_stream_max_write_batch_size (server_stream); bson_init (&cmd); _mongoc_write_command_init (&cmd, command, collection); mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.assembled.operation_id = command->operation_id; parts.is_write_command = true; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (parts.assembled.is_acknowledged) { mongoc_cmd_parts_set_session (&parts, cs); } /* Write commands that include multi-document operations are not retryable. * Set this explicitly so that mongoc_cmd_parts_assemble does not need to * inspect the command body later. */ parts.allow_txn_number = (command->flags.has_multi_write || !parts.assembled.is_acknowledged) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES; BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* * OP_MSG header == 16 byte * + 4 bytes flagBits * + 1 byte payload type = 1 * + 1 byte payload type = 2 * + 4 byte size of payload * == 26 bytes opcode overhead * + X Full command document {insert: "test", writeConcern: {...}} * + Y command identifier ("documents", "deletes", "updates") ( + \0) */ header = 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1; do { memcpy (&len, command->payload.data + payload_batch_size + payload_total_offset, 4); len = BSON_UINT32_FROM_LE (len); if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) { /* Quit if the document is too large */ _mongoc_write_command_too_large_error ( error, index_offset, len, max_bson_obj_size); result->failed = true; break; } else if ((payload_batch_size + header) + len <= max_msg_size) { /* The current batch is still under max batch size in bytes */ payload_batch_size += len; /* If this document filled the maximum document count */ if (++document_count == max_document_count) { ship_it = true; /* If this document is the last document we have */ } else if (payload_batch_size + payload_total_offset == command->payload.len) { ship_it = true; } else { ship_it = false; } } else { ship_it = true; } if (ship_it) { bool is_retryable = parts.is_retryable_write; mongoc_write_err_type_t error_type; /* Seek past the document offset we have already sent */ parts.assembled.payload = command->payload.data + payload_total_offset; /* Only send the documents up to this size */ parts.assembled.payload_size = payload_batch_size; parts.assembled.payload_identifier = gCommandFields[command->type]; /* increment the transaction number for the first attempt of each * retryable write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); /* Add this batch size so we skip these documents next time */ payload_total_offset += payload_batch_size; payload_batch_size = 0; /* If a retryable error is encountered and the write is retryable, * select a new writable stream and retry. If server selection fails or * the selected server does not support retryable writes, fall through * and allow the original error to be reported. */ error_type = _mongoc_write_error_get_type (ret, error, &reply); if (is_retryable && error_type == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, cs, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; bson_destroy (&reply); GOTO (retry); } } if (!ret) { result->failed = true; result->must_stop = true; } /* Result merge needs to know the absolute index for a document * so it can rewrite the error message which contains the relative * document index per batch */ _mongoc_write_result_merge (result, command, &reply, index_offset); index_offset += document_count; document_count = 0; bson_destroy (&reply); } /* While we have more documents to write */ } while (payload_total_offset < command->payload.len); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } if (ret) { /* if a retry succeeded, clear the initial error */ memset (&result->error, 0, sizeof (bson_error_t)); } EXIT; }
static void test_split_insert (void) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; mongoc_collection_t *collection; mongoc_client_t *client; bson_oid_t oid; bson_t **docs; bson_t reply = BSON_INITIALIZER; bson_error_t error; mongoc_server_stream_t *server_stream; int i; bool r; client = test_framework_client_new (); assert (client); collection = get_test_collection (client, "test_split_insert"); assert (collection); docs = (bson_t **)bson_malloc (sizeof(bson_t*) * 3000); for (i = 0; i < 3000; i++) { docs [i] = bson_new (); bson_oid_init (&oid, NULL); BSON_APPEND_OID (docs [i], "_id", &oid); } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert (&command, docs[0], write_flags, ++client->cluster.operation_id, true); for (i = 1; i < 3000; i++) { _mongoc_write_command_insert_append (&command, docs[i]); } server_stream = mongoc_cluster_stream_for_writes (&client->cluster, &error); ASSERT_OR_PRINT (server_stream, error); _mongoc_write_command_execute (&command, client, server_stream, collection->db, collection->collection, NULL, 0, &result); r = _mongoc_write_result_complete (&result, 2, collection->write_concern, &reply, &error); ASSERT_OR_PRINT (r, error); assert (result.nInserted == 3000); _mongoc_write_command_destroy (&command); _mongoc_write_result_destroy (&result); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); for (i = 0; i < 3000; i++) { bson_destroy (docs [i]); } bson_free (docs); mongoc_server_stream_cleanup (server_stream); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
uint32_t mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk, /* IN */ bson_t *reply, /* OUT */ bson_error_t *error) /* OUT */ { mongoc_cluster_t *cluster; mongoc_write_command_t *command; mongoc_server_stream_t *server_stream; bool ret; uint32_t offset = 0; int i; ENTRY; BSON_ASSERT (bulk); cluster = &bulk->client->cluster; if (bulk->executed) { _mongoc_write_result_destroy (&bulk->result); } _mongoc_write_result_init (&bulk->result); bulk->executed = true; if (!bulk->client) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a client " "and one has not been set."); RETURN (false); } else if (!bulk->database) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a database " "and one has not been set."); RETURN (false); } else if (!bulk->collection) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a collection " "and one has not been set."); RETURN (false); } if (reply) { bson_init (reply); } if (!bulk->commands.len) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot do an empty bulk write"); RETURN (false); } if (bulk->server_id) { server_stream = mongoc_cluster_stream_for_server (cluster, bulk->server_id, true /* reconnect_ok */, error); } else { server_stream = mongoc_cluster_stream_for_writes (cluster, error); } if (!server_stream) { RETURN (false); } for (i = 0; i < bulk->commands.len; i++) { command = &_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i); _mongoc_write_command_execute (command, bulk->client, server_stream, bulk->database, bulk->collection, bulk->write_concern, offset, &bulk->result); bulk->server_id = command->server_id; if (bulk->result.failed && bulk->flags.ordered) { GOTO (cleanup); } offset += command->n_documents; } cleanup: ret = _mongoc_write_result_complete (&bulk->result, reply, error); mongoc_server_stream_cleanup (server_stream); RETURN (ret ? bulk->server_id : 0); }
bool _mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; uint32_t buflen = 0; mongoc_cyrus_t sasl; bson_iter_t iter; bool ret = false; const char *tmpstr; uint8_t buf[4096] = {0}; bson_t cmd; bson_t reply; int conv_id = 0; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!_mongoc_cyrus_new_from_cluster ( &sasl, cluster, stream, sd->host.host, error)) { return false; } for (;;) { mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); if (!_mongoc_cyrus_step ( &sasl, buf, buflen, buf, sizeof buf, &buflen, error)) { goto failure; } bson_init (&cmd); if (sasl.step == 1) { _mongoc_cluster_build_sasl_start ( &cmd, sasl.credentials.mechanism, (const char *) buf, buflen); } else { _mongoc_cluster_build_sasl_continue ( &cmd, conv_id, (const char *) buf, buflen); } TRACE ("SASL: authenticating (step %d)", sasl.step); server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); goto failure; } if (!mongoc_cluster_run_command_private ( cluster, &parts.assembled, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); bson_destroy (&reply); goto failure; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } conv_id = _mongoc_cluster_get_conversation_id (&reply); if (!bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_UTF8 (&iter)) { MONGOC_DEBUG ("SASL: authentication failed"); bson_destroy (&reply); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SASL reply from MongoDB server."); goto failure; } tmpstr = bson_iter_utf8 (&iter, &buflen); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL reply from MongoDB is too large."); bson_destroy (&reply); goto failure; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } TRACE ("%s", "SASL: authenticated"); ret = true; failure: _mongoc_cyrus_destroy (&sasl); mongoc_cmd_parts_cleanup (&parts); return ret; }