bool mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option, int32_t value) { const bson_t *options; bson_iter_t iter; BSON_ASSERT (option); if (!mongoc_uri_option_is_int32 (option)) { return false; } if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { bson_iter_overwrite_int32 (&iter, value); return true; } else { return false; } } bson_append_int32(&uri->options, option, -1, value); return true; }
static void test_bson_iter_overwrite_int32 (void) { bson_iter_t iter; bson_t b; bson_init(&b); assert(bson_append_int32(&b, "key", -1, 1234)); assert(bson_iter_init_find(&iter, &b, "key")); assert(BSON_ITER_HOLDS_INT32(&iter)); bson_iter_overwrite_int32(&iter, 4321); assert(bson_iter_init_find(&iter, &b, "key")); assert(BSON_ITER_HOLDS_INT32(&iter)); assert_cmpint(bson_iter_int32(&iter), ==, 4321); bson_destroy(&b); }
static void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { uint32_t current_offset; mongoc_iovec_t *iov; const uint8_t *data; mongoc_rpc_t rpc; bson_iter_t iter; uint32_t len; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns [MONGOC_NAMESPACE_MAX + 1]; bool r; uint32_t n_docs_in_batch; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; r = bson_iter_init (&iter, command->documents); if (!r) { BSON_ASSERT (false); EXIT; } if (!command->n_documents || !bson_iter_next (&iter)) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *)bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t)(sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); do { BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter)); BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx < command->n_documents); bson_iter_document (&iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; too_large_error (error, idx, len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) data; iov[n_docs_in_batch].iov_len = len; size += len; n_docs_in_batch++; } idx++; } while (bson_iter_next (&iter)); if (n_docs_in_batch) { rpc.insert.msg_len = 0; rpc.insert.request_id = 0; rpc.insert.response_to = 0; rpc.insert.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ( (command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; if (!mongoc_cluster_sendv_to_server (&client->cluster, &rpc, 1, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (_mongoc_write_concern_needs_gle (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } } cleanup: if (gle) { _mongoc_write_result_merge_legacy ( result, command, gle, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }