static void _mongoc_write_opmsg (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t index_offset, mongoc_client_session_t *cs, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; bson_t cmd; bson_t reply; bool ret = false; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_document_count; uint32_t header; uint32_t payload_batch_size = 0; uint32_t payload_total_offset = 0; bool ship_it = false; int document_count = 0; int32_t len; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); /* MongoDB has a extra allowance to allow updating 16mb document, * as the update operators would otherwise overflow the 16mb object limit */ #define BSON_OBJECT_ALLOWANCE (16 * 1024) max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); max_document_count = mongoc_server_stream_max_write_batch_size (server_stream); bson_init (&cmd); _mongoc_write_command_init (&cmd, command, collection); mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.assembled.operation_id = command->operation_id; parts.is_write_command = true; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (parts.assembled.is_acknowledged) { mongoc_cmd_parts_set_session (&parts, cs); } /* Write commands that include multi-document operations are not retryable. * Set this explicitly so that mongoc_cmd_parts_assemble does not need to * inspect the command body later. */ parts.allow_txn_number = (command->flags.has_multi_write || !parts.assembled.is_acknowledged) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES; BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* * OP_MSG header == 16 byte * + 4 bytes flagBits * + 1 byte payload type = 1 * + 1 byte payload type = 2 * + 4 byte size of payload * == 26 bytes opcode overhead * + X Full command document {insert: "test", writeConcern: {...}} * + Y command identifier ("documents", "deletes", "updates") ( + \0) */ header = 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1; do { memcpy (&len, command->payload.data + payload_batch_size + payload_total_offset, 4); len = BSON_UINT32_FROM_LE (len); if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) { /* Quit if the document is too large */ _mongoc_write_command_too_large_error ( error, index_offset, len, max_bson_obj_size); result->failed = true; break; } else if ((payload_batch_size + header) + len <= max_msg_size) { /* The current batch is still under max batch size in bytes */ payload_batch_size += len; /* If this document filled the maximum document count */ if (++document_count == max_document_count) { ship_it = true; /* If this document is the last document we have */ } else if (payload_batch_size + payload_total_offset == command->payload.len) { ship_it = true; } else { ship_it = false; } } else { ship_it = true; } if (ship_it) { bool is_retryable = parts.is_retryable_write; mongoc_write_err_type_t error_type; /* Seek past the document offset we have already sent */ parts.assembled.payload = command->payload.data + payload_total_offset; /* Only send the documents up to this size */ parts.assembled.payload_size = payload_batch_size; parts.assembled.payload_identifier = gCommandFields[command->type]; /* increment the transaction number for the first attempt of each * retryable write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); /* Add this batch size so we skip these documents next time */ payload_total_offset += payload_batch_size; payload_batch_size = 0; /* If a retryable error is encountered and the write is retryable, * select a new writable stream and retry. If server selection fails or * the selected server does not support retryable writes, fall through * and allow the original error to be reported. */ error_type = _mongoc_write_error_get_type (ret, error, &reply); if (is_retryable && error_type == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, cs, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; bson_destroy (&reply); GOTO (retry); } } if (!ret) { result->failed = true; result->must_stop = true; } /* Result merge needs to know the absolute index for a document * so it can rewrite the error message which contains the relative * document index per batch */ _mongoc_write_result_merge (result, command, &reply, index_offset); index_offset += document_count; document_count = 0; bson_destroy (&reply); } /* While we have more documents to write */ } while (payload_total_offset < command->payload.len); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } if (ret) { /* if a retry succeeded, clear the initial error */ memset (&result->error, 0, sizeof (bson_error_t)); } EXIT; }
static void _mongoc_write_opquery (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; const char *key; uint32_t len = 0; bson_t ar; bson_t cmd; bson_t reply; char str[16]; bool has_more; bool ret = false; uint32_t i; int32_t max_bson_obj_size; int32_t max_write_batch_size; uint32_t overhead; uint32_t key_len; int data_offset = 0; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); bson_init (&cmd); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream); again: has_more = false; i = 0; _mongoc_write_command_init (&cmd, command, collection); /* 1 byte to specify array type, 1 byte for field name's null terminator */ overhead = cmd.len + 2 + gCommandFieldLens[command->type]; reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); bson_append_array_begin (&cmd, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((bson = bson_reader_read (reader, &eof))) { key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str); len = bson->len; /* 1 byte to specify document type, 1 byte for key's null terminator */ if (_mongoc_write_command_will_overflow (overhead, key_len + len + 2 + ar.len, i, max_bson_obj_size, max_write_batch_size)) { has_more = true; break; } BSON_APPEND_DOCUMENT (&ar, key, bson); data_offset += len; i++; } bson_append_array_end (&cmd, &ar); if (!i) { _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size); result->failed = true; result->must_stop = true; ret = false; if (bson) { data_offset += len; } } else { mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.is_write_command = true; parts.assembled.operation_id = command->operation_id; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } ret = mongoc_cmd_parts_assemble (&parts, server_stream, error); if (ret) { ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); } else { /* assembling failed */ result->must_stop = true; bson_init (&reply); } if (!ret) { result->failed = true; if (bson_empty (&reply)) { /* assembling failed, or a network error running the command */ result->must_stop = true; } } _mongoc_write_result_merge (result, command, &reply, offset); offset += i; bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } bson_reader_destroy (reader); if (has_more && (ret || !command->flags.ordered) && !result->must_stop) { bson_reinit (&cmd); GOTO (again); } bson_destroy (&cmd); EXIT; }
bool _mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; uint32_t buflen = 0; mongoc_cyrus_t sasl; bson_iter_t iter; bool ret = false; const char *tmpstr; uint8_t buf[4096] = {0}; bson_t cmd; bson_t reply; int conv_id = 0; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!_mongoc_cyrus_new_from_cluster ( &sasl, cluster, stream, sd->host.host, error)) { return false; } for (;;) { mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); if (!_mongoc_cyrus_step ( &sasl, buf, buflen, buf, sizeof buf, &buflen, error)) { goto failure; } bson_init (&cmd); if (sasl.step == 1) { _mongoc_cluster_build_sasl_start ( &cmd, sasl.credentials.mechanism, (const char *) buf, buflen); } else { _mongoc_cluster_build_sasl_continue ( &cmd, conv_id, (const char *) buf, buflen); } TRACE ("SASL: authenticating (step %d)", sasl.step); server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); goto failure; } if (!mongoc_cluster_run_command_private ( cluster, &parts.assembled, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); bson_destroy (&reply); goto failure; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } conv_id = _mongoc_cluster_get_conversation_id (&reply); if (!bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_UTF8 (&iter)) { MONGOC_DEBUG ("SASL: authentication failed"); bson_destroy (&reply); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SASL reply from MongoDB server."); goto failure; } tmpstr = bson_iter_utf8 (&iter, &buflen); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL reply from MongoDB is too large."); bson_destroy (&reply); goto failure; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } TRACE ("%s", "SASL: authenticated"); ret = true; failure: _mongoc_cyrus_destroy (&sasl); mongoc_cmd_parts_cleanup (&parts); return ret; }