/* 解码数据包 * return: <0 error,otherwise the number of parameter push to stack */ int32 bson_codec::decode( lua_State *L,const char *buffer,int32 len,const cmd_cfg_t *cfg ) { UNUSED( cfg ); bson_reader_t *reader = bson_reader_new_from_data( (const uint8_t *)buffer,len ); const bson_t *doc = bson_reader_read( reader,NULL ); if ( !doc ) { ERROR( "invalid bson buffer" ); bson_reader_destroy( reader ); return -1; } struct error_collector ec; ec.what[0] = 0; int32 args = lbs_do_decode_stack( L,doc,&ec ); bson_reader_destroy( reader ); if ( args < 0 ) { ERROR( "bson decode:%s",ec.what ); return -1; } return args; }
void _append_array_from_command (mongoc_write_command_t *command, bson_t *bson) { bson_t ar; bson_reader_t *reader; char str[16]; uint32_t i = 0; const char *key; bool eof; const bson_t *current; reader = bson_reader_new_from_data (command->payload.data, command->payload.len); bson_append_array_begin (bson, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((current = bson_reader_read (reader, &eof))) { bson_uint32_to_string (i, &key, str, sizeof str); BSON_APPEND_DOCUMENT (&ar, key, current); i++; } bson_append_array_end (bson, &ar); bson_reader_destroy (reader); }
static void test_reader_from_data_document_length_too_small (void) { bson_reader_t *reader; uint8_t *buffer; bool eof = false; buffer = bson_malloc0(5); buffer[0] = 4; reader = bson_reader_new_from_data(buffer, 5); assert(!bson_reader_read(reader, &eof)); assert_cmpint(eof, ==, false); bson_free(buffer); bson_reader_destroy(reader); }
Array cbson_loads_from_string (const String& bson) { bson_reader_t * reader; const bson_t * obj; bool reached_eof; Array output = Array(); reader = bson_reader_new_from_data((uint8_t *)bson.c_str(), bson.size()); if (!(obj = bson_reader_read(reader, &reached_eof))) { mongoThrow<MongoException>("Unexpected end of BSON. Input document is likely corrupted!"); } output = cbson_loads(obj); bson_reader_destroy(reader); return output; }
static void test_reader_from_data_overflow (void) { bson_reader_t *reader; uint8_t *buffer; const bson_t *b; uint32_t i; bool eof = false; buffer = bson_malloc0(4096); for (i = 0; i < 4095; i += 5) { buffer[i] = 5; } buffer[4095] = 5; reader = bson_reader_new_from_data(buffer, 4096); for (i = 0; (b = bson_reader_read(reader, &eof)); i++) { const uint8_t *buf = bson_get_data(b); assert(b->len == 5); assert(buf[0] == 5); assert(buf[1] == 0); assert(buf[2] == 0); assert(buf[3] == 0); assert(buf[4] == 0); eof = false; } assert(i == (4095/5)); assert_cmpint(eof, ==, false); bson_free(buffer); bson_reader_destroy(reader); }
static void test_reader_from_data (void) { bson_reader_t *reader; bson_uint8_t *buffer; const bson_t *b; bson_uint32_t i; bson_bool_t eof = FALSE; buffer = bson_malloc0(4095); for (i = 0; i < 4095; i += 5) { buffer[i] = 5; } reader = bson_reader_new_from_data(buffer, 4095); for (i = 0; (b = bson_reader_read(reader, &eof)); i++) { const bson_uint8_t *buf = bson_get_data(b); /* do nothing */ assert(b->len == 5); assert(buf[0] == 5); assert(buf[1] == 0); assert(buf[2] == 0); assert(buf[3] == 0); assert(buf[4] == 0); } assert(i == (4095/5)); assert_cmpint(eof, ==, TRUE); bson_free(buffer); bson_reader_destroy(reader); }
static bson_bool_t _mongoc_cursor_get_more (mongoc_cursor_t *cursor) { bson_uint64_t cursor_id; bson_uint32_t request_id; mongoc_rpc_t rpc; ENTRY; BSON_ASSERT(cursor); if (! cursor->in_exhaust) { if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = TRUE; RETURN (FALSE); } if (!(cursor_id = cursor->rpc.reply.cursor_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "No valid cursor was provided."); goto failure; } rpc.get_more.msg_len = 0; rpc.get_more.request_id = 0; rpc.get_more.response_to = 0; rpc.get_more.opcode = MONGOC_OPCODE_GET_MORE; rpc.get_more.zero = 0; rpc.get_more.collection = cursor->ns; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.get_more.n_return = 0; } else { rpc.get_more.n_return = _mongoc_n_return(cursor); } rpc.get_more.cursor_id = cursor_id; /* * TODO: Stamp protections for disconnections. */ if (!_mongoc_client_sendv(cursor->client, &rpc, 1, cursor->hint, NULL, cursor->read_prefs, &cursor->error)) { cursor->done = TRUE; cursor->failed = TRUE; RETURN(FALSE); } request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); } else { request_id = BSON_UINT32_FROM_LE(cursor->rpc.header.request_id); } _mongoc_buffer_clear(&cursor->buffer, FALSE); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, cursor->hint, &cursor->error)) { goto failure; } if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) || (cursor->rpc.header.response_to != request_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "A reply to an invalid request id was received."); goto failure; } if (_mongoc_cursor_unwrap_failure(cursor)) { goto failure; } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); cursor->end_of_event = FALSE; RETURN(TRUE); failure: cursor->done = TRUE; cursor->failed = TRUE; RETURN(FALSE); }
static bson_bool_t _mongoc_cursor_query (mongoc_cursor_t *cursor) { bson_uint32_t hint; bson_uint32_t request_id; mongoc_rpc_t rpc; ENTRY; bson_return_val_if_fail(cursor, FALSE); if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = TRUE; RETURN (FALSE); } rpc.query.msg_len = 0; rpc.query.request_id = 0; rpc.query.response_to = 0; rpc.query.opcode = MONGOC_OPCODE_QUERY; rpc.query.flags = cursor->flags; rpc.query.collection = cursor->ns; rpc.query.skip = cursor->skip; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.query.n_return = 0; } else { rpc.query.n_return = _mongoc_n_return(cursor); } rpc.query.query = bson_get_data(&cursor->query); rpc.query.fields = bson_get_data(&cursor->fields); if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1, 0, NULL, cursor->read_prefs, &cursor->error))) { goto failure; } cursor->hint = hint; request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); _mongoc_buffer_clear(&cursor->buffer, FALSE); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, hint, &cursor->error)) { goto failure; } if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) || (cursor->rpc.header.response_to != request_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "A reply to an invalid request id was received."); goto failure; } if (_mongoc_cursor_unwrap_failure(cursor)) { if ((cursor->error.domain == MONGOC_ERROR_QUERY) && (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) { cursor->failed = TRUE; } goto failure; } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); if (cursor->flags & MONGOC_QUERY_EXHAUST) { cursor->in_exhaust = TRUE; cursor->client->in_exhaust = TRUE; } cursor->done = FALSE; cursor->end_of_event = FALSE; cursor->sent = TRUE; RETURN(TRUE); failure: cursor->failed = TRUE; cursor->done = TRUE; RETURN(FALSE); }
static void _mongoc_write_opquery (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; const char *key; uint32_t len = 0; bson_t ar; bson_t cmd; bson_t reply; char str[16]; bool has_more; bool ret = false; uint32_t i; int32_t max_bson_obj_size; int32_t max_write_batch_size; uint32_t overhead; uint32_t key_len; int data_offset = 0; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); bson_init (&cmd); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream); again: has_more = false; i = 0; _mongoc_write_command_init (&cmd, command, collection); /* 1 byte to specify array type, 1 byte for field name's null terminator */ overhead = cmd.len + 2 + gCommandFieldLens[command->type]; reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); bson_append_array_begin (&cmd, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((bson = bson_reader_read (reader, &eof))) { key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str); len = bson->len; /* 1 byte to specify document type, 1 byte for key's null terminator */ if (_mongoc_write_command_will_overflow (overhead, key_len + len + 2 + ar.len, i, max_bson_obj_size, max_write_batch_size)) { has_more = true; break; } BSON_APPEND_DOCUMENT (&ar, key, bson); data_offset += len; i++; } bson_append_array_end (&cmd, &ar); if (!i) { _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size); result->failed = true; result->must_stop = true; ret = false; if (bson) { data_offset += len; } } else { mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.is_write_command = true; parts.assembled.operation_id = command->operation_id; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } ret = mongoc_cmd_parts_assemble (&parts, server_stream, error); if (ret) { ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); } else { /* assembling failed */ result->must_stop = true; bson_init (&reply); } if (!ret) { result->failed = true; if (bson_empty (&reply)) { /* assembling failed, or a network error running the command */ result->must_stop = true; } } _mongoc_write_result_merge (result, command, &reply, offset); offset += i; bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } bson_reader_destroy (reader); if (has_more && (ret || !command->flags.ordered) && !result->must_stop) { bson_reinit (&cmd); GOTO (again); } bson_destroy (&cmd); EXIT; }
/* {{{ BSON → HHVM */ BsonToVariantConverter::BsonToVariantConverter(const unsigned char *data, int data_len, hippo_bson_conversion_options_t options) { m_reader = bson_reader_new_from_data(data, data_len); m_options = options; }
static bool _mongoc_cursor_query (mongoc_cursor_t *cursor) { mongoc_rpc_t rpc; uint32_t hint; uint32_t request_id; ENTRY; bson_return_val_if_fail (cursor, false); if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = true; RETURN (false); } rpc.query.msg_len = 0; rpc.query.request_id = 0; rpc.query.response_to = 0; rpc.query.opcode = MONGOC_OPCODE_QUERY; rpc.query.flags = cursor->flags; rpc.query.collection = cursor->ns; rpc.query.skip = cursor->skip; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.query.n_return = 0; } else { rpc.query.n_return = _mongoc_n_return(cursor); } rpc.query.query = bson_get_data(&cursor->query); if (cursor->has_fields) { rpc.query.fields = bson_get_data (&cursor->fields); } else { rpc.query.fields = NULL; } if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1, cursor->hint, NULL, cursor->read_prefs, &cursor->error))) { GOTO (failure); } cursor->hint = hint; request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); _mongoc_buffer_clear(&cursor->buffer, false); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, hint, &cursor->error)) { GOTO (failure); } if (cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid opcode. Expected %d, got %d.", MONGOC_OPCODE_REPLY, cursor->rpc.header.opcode); GOTO (failure); } if (cursor->rpc.header.response_to != request_id) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid response_to. Expected %d, got %d.", request_id, cursor->rpc.header.response_to); GOTO (failure); } if (_mongoc_cursor_unwrap_failure(cursor)) { if ((cursor->error.domain == MONGOC_ERROR_QUERY) && (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) { cursor->failed = true; } GOTO (failure); } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); if ((cursor->flags & MONGOC_QUERY_EXHAUST)) { cursor->in_exhaust = true; cursor->client->in_exhaust = true; } cursor->done = false; cursor->end_of_event = false; cursor->sent = true; RETURN (true); failure: cursor->failed = true; cursor->done = true; RETURN (false); }
void _mongoc_write_command_update_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; mongoc_rpc_t rpc; uint32_t request_id = 0; bson_iter_t subiter, subsubiter; bson_t doc; bool has_update, has_selector, is_upsert; bson_t update, selector; bson_t *gle = NULL; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns[MONGOC_NAMESPACE_MAX + 1]; int32_t affected = 0; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); bson_init_static (&doc, data, len); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter)[0] != '$') && !bson_validate ( &doc, (bson_validate_flags_t) vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); bson_reader_destroy (reader); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); bson_reader_destroy (reader); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_reader_destroy (reader); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; has_update = false; has_selector = false; is_upsert = false; bson_iter_init (&subiter, bson); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.update = data; bson_init_static (&update, data, len); has_update = true; } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.selector = data; bson_init_static (&selector, data, len); has_selector = true; } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_UPSERT); } is_upsert = true; } } _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (mongoc_write_concern_is_acknowledged (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (bson_iter_init_find (&subiter, gle, "n") && BSON_ITER_HOLDS_INT32 (&subiter)) { affected = bson_iter_int32 (&subiter); } /* * CDRIVER-372: * * Versions of MongoDB before 2.6 don't return the _id for an * upsert if _id is not an ObjectId. */ if (is_upsert && affected && !bson_iter_init_find (&subiter, gle, "upserted") && bson_iter_init_find (&subiter, gle, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) { if (has_update && bson_iter_init_find (&subiter, &update, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } else if (has_selector && bson_iter_init_find (&subiter, &selector, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } } _mongoc_write_result_merge_legacy ( result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_UPDATE_FAILED, offset); offset++; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); if (gle) { bson_destroy (gle); gle = NULL; } started = bson_get_monotonic_time (); } bson_reader_destroy (reader); }
void _mongoc_write_command_insert_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; uint32_t current_offset; mongoc_iovec_t *iov; mongoc_rpc_t rpc; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (mongoc_write_concern_is_acknowledged (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (gle) { _mongoc_write_result_merge_legacy (result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }
void _mongoc_write_command_delete_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; const uint8_t *data; mongoc_rpc_t rpc; uint32_t request_id; bson_iter_t q_iter; uint32_t len; int64_t limit = 0; bson_t *gle = NULL; char ns[MONGOC_NAMESPACE_MAX + 1]; bool r; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_DELETE_FAILED, "Cannot do an empty delete."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { /* the document is like { "q": { <selector> }, limit: <0 or 1> } */ r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") && BSON_ITER_HOLDS_DOCUMENT (&q_iter)); BSON_ASSERT (r); bson_iter_document (&q_iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_DELETE; rpc.delete_.zero = 0; rpc.delete_.collection = ns; if (bson_iter_find (&q_iter, "limit") && (BSON_ITER_HOLDS_INT (&q_iter))) { limit = bson_iter_as_int64 (&q_iter); } rpc.delete_.flags = limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE; rpc.delete_.selector = data; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (mongoc_write_concern_is_acknowledged (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_write_result_merge_legacy ( result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_DELETE_FAILED, offset); offset++; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); if (gle) { bson_destroy (gle); gle = NULL; } started = bson_get_monotonic_time (); } bson_reader_destroy (reader); EXIT; }
void _mongoc_write_command_update_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; mongoc_rpc_t rpc; uint32_t request_id = 0; bson_iter_t subiter, subsubiter; bson_t doc; bson_t update, selector; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns[MONGOC_NAMESPACE_MAX + 1]; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); BSON_ASSERT (bson_init_static (&doc, data, len)); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter)[0] != '$') && !bson_validate ( &doc, (bson_validate_flags_t) vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); bson_reader_destroy (reader); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); bson_reader_destroy (reader); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_reader_destroy (reader); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; BSON_ASSERT (bson_iter_init (&subiter, bson)); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.update = data; BSON_ASSERT (bson_init_static (&update, data, len)); } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.selector = data; BSON_ASSERT (bson_init_static (&selector, data, len)); } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_UPSERT); } } } _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } bson_reader_destroy (reader); }
void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; mongoc_iovec_t *iov; mongoc_rpc_t rpc; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; GOTO (cleanup); } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (has_more) { GOTO (again); } bson_free (iov); EXIT; }