void _mongoc_write_command_update_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; mongoc_rpc_t rpc; uint32_t request_id = 0; bson_iter_t subiter, subsubiter; bson_t doc; bool has_update, has_selector, is_upsert; bson_t update, selector; bson_t *gle = NULL; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns[MONGOC_NAMESPACE_MAX + 1]; int32_t affected = 0; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); bson_init_static (&doc, data, len); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter)[0] != '$') && !bson_validate ( &doc, (bson_validate_flags_t) vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); bson_reader_destroy (reader); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); bson_reader_destroy (reader); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_reader_destroy (reader); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; has_update = false; has_selector = false; is_upsert = false; bson_iter_init (&subiter, bson); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.update = data; bson_init_static (&update, data, len); has_update = true; } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.selector = data; bson_init_static (&selector, data, len); has_selector = true; } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_UPSERT); } is_upsert = true; } } _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (mongoc_write_concern_is_acknowledged (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (bson_iter_init_find (&subiter, gle, "n") && BSON_ITER_HOLDS_INT32 (&subiter)) { affected = bson_iter_int32 (&subiter); } /* * CDRIVER-372: * * Versions of MongoDB before 2.6 don't return the _id for an * upsert if _id is not an ObjectId. */ if (is_upsert && affected && !bson_iter_init_find (&subiter, gle, "upserted") && bson_iter_init_find (&subiter, gle, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) { if (has_update && bson_iter_init_find (&subiter, &update, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } else if (has_selector && bson_iter_init_find (&subiter, &selector, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } } _mongoc_write_result_merge_legacy ( result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_UPDATE_FAILED, offset); offset++; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); if (gle) { bson_destroy (gle); gle = NULL; } started = bson_get_monotonic_time (); } bson_reader_destroy (reader); }
void _mongoc_write_command_insert_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; uint32_t current_offset; mongoc_iovec_t *iov; mongoc_rpc_t rpc; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (mongoc_write_concern_is_acknowledged (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (gle) { _mongoc_write_result_merge_legacy (result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }
void _mongoc_write_command_update_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; mongoc_rpc_t rpc; uint32_t request_id = 0; bson_iter_t subiter, subsubiter; bson_t doc; bson_t update, selector; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns[MONGOC_NAMESPACE_MAX + 1]; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); BSON_ASSERT (bson_init_static (&doc, data, len)); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter)[0] != '$') && !bson_validate ( &doc, (bson_validate_flags_t) vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); bson_reader_destroy (reader); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); bson_reader_destroy (reader); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_reader_destroy (reader); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; BSON_ASSERT (bson_iter_init (&subiter, bson)); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.update = data; BSON_ASSERT (bson_init_static (&update, data, len)); } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.selector = data; BSON_ASSERT (bson_init_static (&selector, data, len)); } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_UPSERT); } } } _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } bson_reader_destroy (reader); }
void _mongoc_write_command_delete_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; const uint8_t *data; mongoc_rpc_t rpc; uint32_t request_id; bson_iter_t q_iter; uint32_t len; int64_t limit = 0; bson_t *gle = NULL; char ns[MONGOC_NAMESPACE_MAX + 1]; bool r; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_DELETE_FAILED, "Cannot do an empty delete."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { /* the document is like { "q": { <selector> }, limit: <0 or 1> } */ r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") && BSON_ITER_HOLDS_DOCUMENT (&q_iter)); BSON_ASSERT (r); bson_iter_document (&q_iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_DELETE; rpc.delete_.zero = 0; rpc.delete_.collection = ns; if (bson_iter_find (&q_iter, "limit") && (BSON_ITER_HOLDS_INT (&q_iter))) { limit = bson_iter_as_int64 (&q_iter); } rpc.delete_.flags = limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE; rpc.delete_.selector = data; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (mongoc_write_concern_is_acknowledged (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_write_result_merge_legacy ( result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_DELETE_FAILED, offset); offset++; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); if (gle) { bson_destroy (gle); gle = NULL; } started = bson_get_monotonic_time (); } bson_reader_destroy (reader); EXIT; }
void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; mongoc_iovec_t *iov; mongoc_rpc_t rpc; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; GOTO (cleanup); } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (has_more) { GOTO (again); } bson_free (iov); EXIT; }