static void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { uint32_t current_offset; mongoc_iovec_t *iov; const uint8_t *data; mongoc_rpc_t rpc; bson_iter_t iter; uint32_t len; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns [MONGOC_NAMESPACE_MAX + 1]; bool r; uint32_t n_docs_in_batch; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; r = bson_iter_init (&iter, command->documents); if (!r) { BSON_ASSERT (false); EXIT; } if (!command->n_documents || !bson_iter_next (&iter)) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *)bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t)(sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); do { BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter)); BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx < command->n_documents); bson_iter_document (&iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; too_large_error (error, idx, len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) data; iov[n_docs_in_batch].iov_len = len; size += len; n_docs_in_batch++; } idx++; } while (bson_iter_next (&iter)); if (n_docs_in_batch) { rpc.insert.msg_len = 0; rpc.insert.request_id = 0; rpc.insert.response_to = 0; rpc.insert.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ( (command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; if (!mongoc_cluster_sendv_to_server (&client->cluster, &rpc, 1, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (_mongoc_write_concern_needs_gle (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } } cleanup: if (gle) { _mongoc_write_result_merge_legacy ( result, command, gle, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }
static void _mongoc_write_opmsg (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t index_offset, mongoc_client_session_t *cs, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; bson_t cmd; bson_t reply; bool ret = false; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_document_count; uint32_t header; uint32_t payload_batch_size = 0; uint32_t payload_total_offset = 0; bool ship_it = false; int document_count = 0; int32_t len; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); /* MongoDB has a extra allowance to allow updating 16mb document, * as the update operators would otherwise overflow the 16mb object limit */ #define BSON_OBJECT_ALLOWANCE (16 * 1024) max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); max_document_count = mongoc_server_stream_max_write_batch_size (server_stream); bson_init (&cmd); _mongoc_write_command_init (&cmd, command, collection); mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.assembled.operation_id = command->operation_id; parts.is_write_command = true; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (parts.assembled.is_acknowledged) { mongoc_cmd_parts_set_session (&parts, cs); } /* Write commands that include multi-document operations are not retryable. * Set this explicitly so that mongoc_cmd_parts_assemble does not need to * inspect the command body later. */ parts.allow_txn_number = (command->flags.has_multi_write || !parts.assembled.is_acknowledged) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES; BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* * OP_MSG header == 16 byte * + 4 bytes flagBits * + 1 byte payload type = 1 * + 1 byte payload type = 2 * + 4 byte size of payload * == 26 bytes opcode overhead * + X Full command document {insert: "test", writeConcern: {...}} * + Y command identifier ("documents", "deletes", "updates") ( + \0) */ header = 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1; do { memcpy (&len, command->payload.data + payload_batch_size + payload_total_offset, 4); len = BSON_UINT32_FROM_LE (len); if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) { /* Quit if the document is too large */ _mongoc_write_command_too_large_error ( error, index_offset, len, max_bson_obj_size); result->failed = true; break; } else if ((payload_batch_size + header) + len <= max_msg_size) { /* The current batch is still under max batch size in bytes */ payload_batch_size += len; /* If this document filled the maximum document count */ if (++document_count == max_document_count) { ship_it = true; /* If this document is the last document we have */ } else if (payload_batch_size + payload_total_offset == command->payload.len) { ship_it = true; } else { ship_it = false; } } else { ship_it = true; } if (ship_it) { bool is_retryable = parts.is_retryable_write; mongoc_write_err_type_t error_type; /* Seek past the document offset we have already sent */ parts.assembled.payload = command->payload.data + payload_total_offset; /* Only send the documents up to this size */ parts.assembled.payload_size = payload_batch_size; parts.assembled.payload_identifier = gCommandFields[command->type]; /* increment the transaction number for the first attempt of each * retryable write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); /* Add this batch size so we skip these documents next time */ payload_total_offset += payload_batch_size; payload_batch_size = 0; /* If a retryable error is encountered and the write is retryable, * select a new writable stream and retry. If server selection fails or * the selected server does not support retryable writes, fall through * and allow the original error to be reported. */ error_type = _mongoc_write_error_get_type (ret, error, &reply); if (is_retryable && error_type == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, cs, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; bson_destroy (&reply); GOTO (retry); } } if (!ret) { result->failed = true; result->must_stop = true; } /* Result merge needs to know the absolute index for a document * so it can rewrite the error message which contains the relative * document index per batch */ _mongoc_write_result_merge (result, command, &reply, index_offset); index_offset += document_count; document_count = 0; bson_destroy (&reply); } /* While we have more documents to write */ } while (payload_total_offset < command->payload.len); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } if (ret) { /* if a retry succeeded, clear the initial error */ memset (&result->error, 0, sizeof (bson_error_t)); } EXIT; }
void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; mongoc_iovec_t *iov; mongoc_rpc_t rpc; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; GOTO (cleanup); } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (has_more) { GOTO (again); } bson_free (iov); EXIT; }