/** * Get a int32 from the buffer. */ VALUE rb_bson_byte_buffer_get_int32(VALUE self) { byte_buffer_t *b; int32_t i32; TypedData_Get_Struct(self, byte_buffer_t, &rb_byte_buffer_data_type, b); ENSURE_BSON_READ(b, 4); memcpy(&i32, READ_PTR(b), 4); b->read_position += 4; return INT2NUM(BSON_UINT32_FROM_LE(i32)); }
static const bson_t * _bson_reader_handle_read (bson_reader_handle_t *reader, /* IN */ bool *reached_eof) /* IN */ { int32_t blen; bson_return_val_if_fail (reader, NULL); if (reached_eof) { *reached_eof = false; } while (!reader->done) { if ((reader->end - reader->offset) < 4) { _bson_reader_handle_fill_buffer (reader); continue; } memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if (blen < 5) { return NULL; } if (blen > (int32_t)(reader->end - reader->offset)) { if (blen > (int32_t)reader->len) { _bson_reader_handle_grow_buffer (reader); } _bson_reader_handle_fill_buffer (reader); continue; } if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], (uint32_t)blen)) { return NULL; } reader->offset += blen; return &reader->inline_bson; } if (reached_eof) { *reached_eof = reader->done && !reader->failed; } return NULL; }
mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_len (mongoc_async_cmd_t *acmd) { ssize_t bytes = _mongoc_buffer_try_append_from_stream ( &acmd->buffer, acmd->stream, acmd->bytes_to_read, 0); uint32_t msg_len; if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to receive length header from server."); return MONGOC_ASYNC_CMD_ERROR; } if (bytes == 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Server closed connection."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = (size_t) (acmd->bytes_to_read - bytes); if (!acmd->bytes_to_read) { memcpy (&msg_len, acmd->buffer.data, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16 || msg_len > MONGOC_DEFAULT_MAX_MSG_SIZE || msg_len < acmd->buffer.len) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = msg_len - acmd->buffer.len; acmd->state = MONGOC_ASYNC_CMD_RECV_RPC; return _mongoc_async_cmd_phase_recv_rpc (acmd); } return MONGOC_ASYNC_CMD_IN_PROGRESS; }
/** * Performs an aggregation operation on a MongoDB collection. * * @param collection The MongoDB collection to query against. * @param pipeline A pointer to a BSON buffer representing the pipeline. * @param coldata The column data to store the results in. * * @return If successful, a Monary cursor that should be freed with * monary_close_query() when no longer in use. If unsuccessful, or if an invalid * pipeline was passed in, NULL is returned. */ monary_cursor* monary_init_aggregate(mongoc_collection_t* collection, const uint8_t* pipeline, monary_column_data* coldata) { bson_t pl_bson; int32_t pl_size; mongoc_cursor_t* mcursor; monary_cursor* cursor; // Sanity checks if (!collection) { DEBUG("%s", "Invalid collection"); return NULL; } else if (!pipeline) { DEBUG("%s", "Invalid pipeline"); return NULL; } // Build BSON pipeline memcpy(&pl_size, pipeline, sizeof(int32_t)); pl_size = (int32_t) BSON_UINT32_FROM_LE(pl_size); if (!bson_init_static(&pl_bson, pipeline, pl_size)) { DEBUG("%s", "Failed to initialize raw BSON pipeline"); return NULL; } // Get an aggregation cursor mcursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pl_bson, NULL, NULL); // Clean up bson_destroy(&pl_bson); if (!mcursor) { DEBUG("%s", "An error occurred with the aggregation"); return NULL; } cursor = (monary_cursor*) malloc(sizeof(monary_cursor)); cursor->mcursor = mcursor; cursor->coldata = coldata; return cursor; }
/** * Get a string from the buffer. */ VALUE rb_bson_byte_buffer_get_string(VALUE self) { byte_buffer_t *b; int32_t length; int32_t length_le; VALUE string; TypedData_Get_Struct(self, byte_buffer_t, &rb_byte_buffer_data_type, b); ENSURE_BSON_READ(b, 4); memcpy(&length, READ_PTR(b), 4); length_le = BSON_UINT32_FROM_LE(length); b->read_position += 4; ENSURE_BSON_READ(b, length_le); string = rb_enc_str_new(READ_PTR(b), length_le - 1, rb_utf8_encoding()); b->read_position += length_le; return string; }
bool _mongoc_rpc_scatter (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen) { mongoc_opcode_t opcode; bson_return_val_if_fail(rpc, false); bson_return_val_if_fail(buf, false); bson_return_val_if_fail(buflen, false); memset (rpc, 0, sizeof *rpc); if (BSON_UNLIKELY(buflen < 16)) { return false; } if (!_mongoc_rpc_scatter_header(&rpc->header, buf, 16)) { return false; } opcode = BSON_UINT32_FROM_LE(rpc->header.opcode); switch (opcode) { case MONGOC_OPCODE_REPLY: return _mongoc_rpc_scatter_reply(&rpc->reply, buf, buflen); case MONGOC_OPCODE_MSG: return _mongoc_rpc_scatter_msg(&rpc->msg, buf, buflen); case MONGOC_OPCODE_UPDATE: return _mongoc_rpc_scatter_update(&rpc->update, buf, buflen); case MONGOC_OPCODE_INSERT: return _mongoc_rpc_scatter_insert(&rpc->insert, buf, buflen); case MONGOC_OPCODE_QUERY: return _mongoc_rpc_scatter_query(&rpc->query, buf, buflen); case MONGOC_OPCODE_GET_MORE: return _mongoc_rpc_scatter_get_more(&rpc->get_more, buf, buflen); case MONGOC_OPCODE_DELETE: return _mongoc_rpc_scatter_delete(&rpc->delete, buf, buflen); case MONGOC_OPCODE_KILL_CURSORS: return _mongoc_rpc_scatter_kill_cursors(&rpc->kill_cursors, buf, buflen); default: MONGOC_WARNING("Unknown rpc type: 0x%08x", opcode); return false; } }
bool _mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson) { int32_t len; if (!reply->documents || reply->documents_len < 4) { return false; } memcpy(&len, reply->documents, 4); len = BSON_UINT32_FROM_LE(len); if (reply->documents_len < len) { return false; } return bson_init_static(bson, reply->documents, len); }
static const bson_t * _bson_reader_data_read (bson_reader_data_t *reader, bson_bool_t *reached_eof) { bson_uint32_t blen; bson_return_val_if_fail (reader, NULL); if (reached_eof) { *reached_eof = FALSE; } if ((reader->offset + 4) < reader->length) { memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if ((blen + reader->offset) <= reader->length) { if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], blen)) { if (reached_eof) { *reached_eof = FALSE; } return NULL; } reader->offset += blen; if (reached_eof) { *reached_eof = (reader->offset == reader->length); } return &reader->inline_bson; } } if (reached_eof) { *reached_eof = (reader->offset == reader->length); } return NULL; }
static void append_documents_from_cmd (const mongoc_cmd_t *cmd, mongoc_apm_command_started_t *event) { int32_t doc_len; bson_t doc; const uint8_t *pos; const char *field_name; bson_t bson; char str[16]; const char *key; uint32_t i; if (!cmd->payload || !cmd->payload_size) { return; } if (!event->command_owned) { event->command = bson_copy (event->command); event->command_owned = true; } /* make array from outgoing OP_MSG payload type 1 on an "insert", * "update", or "delete" command. */ field_name = _mongoc_get_documents_field_name (cmd->command_name); BSON_ASSERT (field_name); BSON_ASSERT (BSON_APPEND_ARRAY_BEGIN (event->command, field_name, &bson)); pos = cmd->payload; i = 0; while (pos < cmd->payload + cmd->payload_size) { memcpy (&doc_len, pos, sizeof (doc_len)); doc_len = BSON_UINT32_FROM_LE (doc_len); BSON_ASSERT (bson_init_static (&doc, pos, (size_t) doc_len)); bson_uint32_to_string (i, &key, str, sizeof (str)); BSON_APPEND_DOCUMENT (&bson, key, &doc); pos += doc_len; i++; } bson_append_array_end (event->command, &bson); }
static const bson_t * _bson_reader_fd_read (bson_reader_fd_t *reader, bson_bool_t *reached_eof) { bson_uint32_t blen; bson_return_val_if_fail (reader, NULL); while (!reader->done) { if ((reader->end - reader->offset) < 4) { _bson_reader_fd_fill_buffer (reader); continue; } memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if (blen > (reader->end - reader->offset)) { if (blen > reader->len) { _bson_reader_fd_grow_buffer (reader); } _bson_reader_fd_fill_buffer (reader); continue; } if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], blen)) { return NULL; } reader->offset += blen; return &reader->inline_bson; } if (reached_eof) { *reached_eof = reader->done && !reader->failed; } return NULL; }
void _mongoc_rpc_swab_from_le (mongoc_rpc_t *rpc) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN mongoc_opcode_t opcode; bson_return_if_fail(rpc); opcode = BSON_UINT32_FROM_LE(rpc->header.opcode); switch (opcode) { case MONGOC_OPCODE_REPLY: _mongoc_rpc_swab_from_le_reply(&rpc->reply); break; case MONGOC_OPCODE_MSG: _mongoc_rpc_swab_from_le_msg(&rpc->msg); break; case MONGOC_OPCODE_UPDATE: _mongoc_rpc_swab_from_le_update(&rpc->update); break; case MONGOC_OPCODE_INSERT: _mongoc_rpc_swab_from_le_insert(&rpc->insert); break; case MONGOC_OPCODE_QUERY: _mongoc_rpc_swab_from_le_query(&rpc->query); break; case MONGOC_OPCODE_GET_MORE: _mongoc_rpc_swab_from_le_get_more(&rpc->get_more); break; case MONGOC_OPCODE_DELETE: _mongoc_rpc_swab_from_le_delete(&rpc->delete); break; case MONGOC_OPCODE_KILL_CURSORS: _mongoc_rpc_swab_from_le_kill_cursors(&rpc->kill_cursors); break; default: MONGOC_WARNING("Unknown rpc type: 0x%08x", rpc->header.opcode); break; } #endif }
bson_bool_t _mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson) { bson_int32_t len; bson_return_val_if_fail(reply, FALSE); bson_return_val_if_fail(bson, FALSE); if (!reply->documents || reply->documents_len < 4) { return FALSE; } memcpy(&len, reply->documents, 4); len = BSON_UINT32_FROM_LE(len); if (reply->documents_len < len) { return FALSE; } return bson_init_static(bson, reply->documents, len); }
static const bson_t * _bson_reader_data_read (bson_reader_data_t *reader, /* IN */ bool *reached_eof) /* IN */ { int32_t blen; bson_return_val_if_fail (reader, NULL); if (reached_eof) { *reached_eof = false; } if ((reader->offset + 4) < reader->length) { memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if (blen < 5) { return NULL; } if (blen > (int32_t)(reader->length - reader->offset)) { return NULL; } if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], (uint32_t)blen)) { return NULL; } reader->offset += blen; return &reader->inline_bson; } if (reached_eof) { *reached_eof = (reader->offset == reader->length); } return NULL; }
static bool handle_command (mock_server_t *server, mongoc_stream_t *client, mongoc_rpc_t *rpc) { int32_t len; bool ret = false; bson_iter_t iter; const char *key; bson_t doc; BSON_ASSERT (rpc); if (rpc->header.opcode != MONGOC_OPCODE_QUERY) { return false; } memcpy (&len, rpc->query.query, 4); len = BSON_UINT32_FROM_LE (len); if (!bson_init_static (&doc, rpc->query.query, len)) { return false; } if (!bson_iter_init (&iter, &doc) || !bson_iter_next (&iter)) { return false; } key = bson_iter_key (&iter); if (!strcasecmp (key, "ismaster")) { ret = handle_ismaster (server, client, rpc, &doc); } else if (!strcasecmp (key, "ping")) { ret = handle_ping (server, client, rpc, &doc); } bson_destroy (&doc); return ret; }
static void read_prefs_handler (mock_server_t *server, mongoc_stream_t *stream, mongoc_rpc_t *rpc, void *user_data) { bool *success = user_data; int32_t len; bson_iter_t iter; bson_iter_t child; bson_iter_t child2; bson_iter_t child3; bson_t b; bson_t reply = BSON_INITIALIZER; int r; if (rpc->header.opcode == MONGOC_OPCODE_QUERY) { memcpy (&len, rpc->query.query, 4); len = BSON_UINT32_FROM_LE (len); r = bson_init_static (&b, rpc->query.query, len); assert (r); r = bson_iter_init_find (&iter, &b, "$query"); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); r = bson_iter_init_find (&iter, &b, "$readPreference"); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); r = bson_iter_recurse (&iter, &child); assert (r); r = bson_iter_next (&child); assert (r); assert (BSON_ITER_HOLDS_UTF8 (&child)); assert (!strcmp ("mode", bson_iter_key (&child))); assert (!strcmp ("secondaryPreferred", bson_iter_utf8 (&child, NULL))); r = bson_iter_next (&child); assert (r); assert (BSON_ITER_HOLDS_ARRAY (&child)); r = bson_iter_recurse (&child, &child2); assert (r); r = bson_iter_next (&child2); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&child2)); r = bson_iter_recurse (&child2, &child3); assert (r); r = bson_iter_next (&child3); assert (r); assert (BSON_ITER_HOLDS_UTF8 (&child3)); assert (!strcmp ("dc", bson_iter_key (&child3))); assert (!strcmp ("ny", bson_iter_utf8 (&child3, NULL))); r = bson_iter_next (&child3); assert (!r); r = bson_iter_next (&child2); assert (r); r = bson_iter_recurse (&child2, &child3); assert (r); r = bson_iter_next (&child3); assert (!r); mock_server_reply_simple (server, stream, rpc, MONGOC_REPLY_NONE, &reply); *success = true; } }
static void * worker_thread (void *data) { worker_closure_t *closure = (worker_closure_t *) data; mock_server_t *server = closure->server; mongoc_stream_t *client_stream = closure->client_stream; mongoc_buffer_t buffer; mongoc_rpc_t *rpc = NULL; bool handled; bson_error_t error; int32_t msg_len; sync_queue_t *requests; sync_queue_t *replies; request_t *request; mongoc_array_t autoresponders; ssize_t i; autoresponder_handle_t handle; reply_t *reply; #ifdef MONGOC_ENABLE_SSL bool ssl; #endif ENTRY; /* queue of client replies sent over this worker's connection */ replies = q_new (); #ifdef MONGOC_ENABLE_SSL mongoc_mutex_lock (&server->mutex); ssl = server->ssl; mongoc_mutex_unlock (&server->mutex); if (ssl) { if (!mongoc_stream_tls_handshake_block (client_stream, "localhost", TIMEOUT, &error)) { mongoc_stream_close (client_stream); mongoc_stream_destroy (client_stream); RETURN (NULL); } } #endif _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL); _mongoc_array_init (&autoresponders, sizeof (autoresponder_handle_t)); again: /* loop, checking for requests to receive or replies to send */ bson_free (rpc); rpc = NULL; if (_mongoc_buffer_fill (&buffer, client_stream, 4, 10, &error) > 0) { assert (buffer.len >= 4); memcpy (&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); GOTO (failure); } if (_mongoc_buffer_fill (&buffer, client_stream, (size_t) msg_len, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", BSON_FUNC, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= (unsigned) msg_len); /* copies message from buffer */ request = request_new (&buffer, msg_len, server, client_stream, closure->port, replies); memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; mongoc_mutex_lock (&server->mutex); _mongoc_array_copy (&autoresponders, &server->autoresponders); mongoc_mutex_unlock (&server->mutex); test_suite_mock_server_log ("%5.2f %hu -> %hu %s", mock_server_get_uptime_sec (server), closure->port, server->port, request->as_str); /* run responders most-recently-added-first */ handled = false; for (i = server->autoresponders.len - 1; i >= 0; i--) { handle = _mongoc_array_index (&server->autoresponders, autoresponder_handle_t, i); if (handle.responder (request, handle.data)) { /* responder destroyed request and enqueued a reply in "replies" */ handled = true; request = NULL; break; } } if (!handled) { /* pass to the main thread via the queue */ requests = mock_server_get_queue (server); q_put (requests, (void *) request); } } if (_mock_server_stopping (server)) { GOTO (failure); } reply = q_get (replies, 10); if (reply) { _mock_server_reply_with_stream (server, reply, client_stream); _reply_destroy (reply); } if (_mock_server_stopping (server)) { GOTO (failure); } GOTO (again); failure: _mongoc_array_destroy (&autoresponders); _mongoc_buffer_destroy (&buffer); mongoc_stream_close (client_stream); mongoc_stream_destroy (client_stream); bson_free (rpc); bson_free (closure); _mongoc_buffer_destroy (&buffer); while ((reply = q_get_nowait (replies))) { _reply_destroy (reply); } q_destroy (replies); RETURN (NULL); }
/* TODO: factor */ static void * worker_thread (void *data) { worker_closure_t *closure = (worker_closure_t *) data; mock_server_t *server = closure->server; mongoc_stream_t *client_stream = closure->client_stream; mongoc_buffer_t buffer; mongoc_rpc_t *rpc = NULL; bool handled; bson_error_t error; int32_t msg_len; bool stopped; sync_queue_t *q; request_t *request; mongoc_array_t autoresponders; ssize_t i; autoresponder_handle_t handle; #ifdef MONGOC_ENABLE_SSL bool ssl; #endif ENTRY; BSON_ASSERT(closure); #ifdef MONGOC_ENABLE_SSL mongoc_mutex_lock (&server->mutex); ssl = server->ssl; mongoc_mutex_unlock (&server->mutex); if (ssl) { if (!mongoc_stream_tls_handshake_block (client_stream, "localhost", TIMEOUT, &error)) { MONGOC_ERROR("Blocking TLS handshake failed"); mongoc_stream_close (client_stream); mongoc_stream_destroy (client_stream); RETURN (NULL); } } #endif _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL); _mongoc_array_init (&autoresponders, sizeof (autoresponder_handle_t)); again: bson_free (rpc); rpc = NULL; handled = false; mongoc_mutex_lock (&server->mutex); stopped = server->stopped; mongoc_mutex_unlock (&server->mutex); if (stopped) { GOTO(failure); } if (_mongoc_buffer_fill (&buffer, client_stream, 4, TIMEOUT, &error) == -1) { GOTO (again); } assert (buffer.len >= 4); memcpy (&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); GOTO (failure); } if (_mongoc_buffer_fill (&buffer, client_stream, (size_t) msg_len, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", BSON_FUNC, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= (unsigned) msg_len); /* copies message from buffer */ request = request_new (&buffer, msg_len, server, client_stream, closure->port); mongoc_mutex_lock (&server->mutex); _mongoc_array_copy (&autoresponders, &server->autoresponders); mongoc_mutex_unlock (&server->mutex); if (mock_server_get_verbose (server)) { printf ("%5.2f %hu -> %hu %s\n", mock_server_get_uptime_sec (server), closure->port, server->port, request->as_str); fflush (stdout); } /* run responders most-recently-added-first */ for (i = server->autoresponders.len - 1; i >= 0; i--) { handle = _mongoc_array_index (&server->autoresponders, autoresponder_handle_t, i); if (handle.responder (request, handle.data)) { handled = true; /* responder should destroy the request */ request = NULL; break; } } if (!handled) { q = mock_server_get_queue (server); q_put (q, (void *) request); request = NULL; } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; GOTO (again); failure: _mongoc_array_destroy (&autoresponders); _mongoc_buffer_destroy (&buffer); mongoc_stream_close (client_stream); mongoc_stream_destroy (client_stream); bson_free (rpc); bson_free (closure); _mongoc_buffer_destroy (&buffer); RETURN (NULL); }
static bson_bool_t _mongoc_cursor_get_more (mongoc_cursor_t *cursor) { bson_uint64_t cursor_id; bson_uint32_t request_id; mongoc_rpc_t rpc; ENTRY; BSON_ASSERT(cursor); if (! cursor->in_exhaust) { if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = TRUE; RETURN (FALSE); } if (!(cursor_id = cursor->rpc.reply.cursor_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "No valid cursor was provided."); goto failure; } rpc.get_more.msg_len = 0; rpc.get_more.request_id = 0; rpc.get_more.response_to = 0; rpc.get_more.opcode = MONGOC_OPCODE_GET_MORE; rpc.get_more.zero = 0; rpc.get_more.collection = cursor->ns; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.get_more.n_return = 0; } else { rpc.get_more.n_return = _mongoc_n_return(cursor); } rpc.get_more.cursor_id = cursor_id; /* * TODO: Stamp protections for disconnections. */ if (!_mongoc_client_sendv(cursor->client, &rpc, 1, cursor->hint, NULL, cursor->read_prefs, &cursor->error)) { cursor->done = TRUE; cursor->failed = TRUE; RETURN(FALSE); } request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); } else { request_id = BSON_UINT32_FROM_LE(cursor->rpc.header.request_id); } _mongoc_buffer_clear(&cursor->buffer, FALSE); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, cursor->hint, &cursor->error)) { goto failure; } if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) || (cursor->rpc.header.response_to != request_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "A reply to an invalid request id was received."); goto failure; } if (_mongoc_cursor_unwrap_failure(cursor)) { goto failure; } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); cursor->end_of_event = FALSE; RETURN(TRUE); failure: cursor->done = TRUE; cursor->failed = TRUE; RETURN(FALSE); }
static bson_bool_t _mongoc_cursor_query (mongoc_cursor_t *cursor) { bson_uint32_t hint; bson_uint32_t request_id; mongoc_rpc_t rpc; ENTRY; bson_return_val_if_fail(cursor, FALSE); if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = TRUE; RETURN (FALSE); } rpc.query.msg_len = 0; rpc.query.request_id = 0; rpc.query.response_to = 0; rpc.query.opcode = MONGOC_OPCODE_QUERY; rpc.query.flags = cursor->flags; rpc.query.collection = cursor->ns; rpc.query.skip = cursor->skip; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.query.n_return = 0; } else { rpc.query.n_return = _mongoc_n_return(cursor); } rpc.query.query = bson_get_data(&cursor->query); rpc.query.fields = bson_get_data(&cursor->fields); if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1, 0, NULL, cursor->read_prefs, &cursor->error))) { goto failure; } cursor->hint = hint; request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); _mongoc_buffer_clear(&cursor->buffer, FALSE); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, hint, &cursor->error)) { goto failure; } if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) || (cursor->rpc.header.response_to != request_id)) { bson_set_error(&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "A reply to an invalid request id was received."); goto failure; } if (_mongoc_cursor_unwrap_failure(cursor)) { if ((cursor->error.domain == MONGOC_ERROR_QUERY) && (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) { cursor->failed = TRUE; } goto failure; } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); if (cursor->flags & MONGOC_QUERY_EXHAUST) { cursor->in_exhaust = TRUE; cursor->client->in_exhaust = TRUE; } cursor->done = FALSE; cursor->end_of_event = FALSE; cursor->sent = TRUE; RETURN(TRUE); failure: cursor->failed = TRUE; cursor->done = TRUE; RETURN(FALSE); }
static void * mock_server_worker (void *data) { mongoc_buffer_t buffer; mongoc_stream_t *stream; mock_server_t *server; mongoc_rpc_t rpc; bson_error_t error; int32_t msg_len; void **closure = data; BSON_ASSERT(closure); server = closure[0]; stream = closure[1]; _mongoc_buffer_init(&buffer, NULL, 0, NULL); again: if (_mongoc_buffer_fill (&buffer, stream, 4, INT_MAX, &error) == -1) { MONGOC_WARNING ("%s", error.message); goto failure; } assert (buffer.len >= 4); memcpy(&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE(msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); goto failure; } if (_mongoc_buffer_fill (&buffer, stream, msg_len, INT_MAX, &error) == -1) { MONGOC_WARNING ("%s", error.message); goto failure; } assert (buffer.len >= msg_len); DUMP_BYTES (buffer, buffer.data + buffer.off, buffer.len); if (!_mongoc_rpc_scatter(&rpc, buffer.data + buffer.off, msg_len)) { MONGOC_WARNING ("Failed to scatter"); goto failure; } _mongoc_rpc_swab_from_le(&rpc); if (!handle_command(server, stream, &rpc)) { server->handler(server, stream, &rpc, server->handler_data); } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; goto again; failure: mongoc_stream_close(stream); mongoc_stream_destroy(stream); bson_free(closure); return NULL; }
static void _mongoc_write_opmsg (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t index_offset, mongoc_client_session_t *cs, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; bson_t cmd; bson_t reply; bool ret = false; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_document_count; uint32_t header; uint32_t payload_batch_size = 0; uint32_t payload_total_offset = 0; bool ship_it = false; int document_count = 0; int32_t len; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); /* MongoDB has a extra allowance to allow updating 16mb document, * as the update operators would otherwise overflow the 16mb object limit */ #define BSON_OBJECT_ALLOWANCE (16 * 1024) max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); max_document_count = mongoc_server_stream_max_write_batch_size (server_stream); bson_init (&cmd); _mongoc_write_command_init (&cmd, command, collection); mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.assembled.operation_id = command->operation_id; parts.is_write_command = true; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (parts.assembled.is_acknowledged) { mongoc_cmd_parts_set_session (&parts, cs); } /* Write commands that include multi-document operations are not retryable. * Set this explicitly so that mongoc_cmd_parts_assemble does not need to * inspect the command body later. */ parts.allow_txn_number = (command->flags.has_multi_write || !parts.assembled.is_acknowledged) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES; BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* * OP_MSG header == 16 byte * + 4 bytes flagBits * + 1 byte payload type = 1 * + 1 byte payload type = 2 * + 4 byte size of payload * == 26 bytes opcode overhead * + X Full command document {insert: "test", writeConcern: {...}} * + Y command identifier ("documents", "deletes", "updates") ( + \0) */ header = 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1; do { memcpy (&len, command->payload.data + payload_batch_size + payload_total_offset, 4); len = BSON_UINT32_FROM_LE (len); if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) { /* Quit if the document is too large */ _mongoc_write_command_too_large_error ( error, index_offset, len, max_bson_obj_size); result->failed = true; break; } else if ((payload_batch_size + header) + len <= max_msg_size) { /* The current batch is still under max batch size in bytes */ payload_batch_size += len; /* If this document filled the maximum document count */ if (++document_count == max_document_count) { ship_it = true; /* If this document is the last document we have */ } else if (payload_batch_size + payload_total_offset == command->payload.len) { ship_it = true; } else { ship_it = false; } } else { ship_it = true; } if (ship_it) { bool is_retryable = parts.is_retryable_write; mongoc_write_err_type_t error_type; /* Seek past the document offset we have already sent */ parts.assembled.payload = command->payload.data + payload_total_offset; /* Only send the documents up to this size */ parts.assembled.payload_size = payload_batch_size; parts.assembled.payload_identifier = gCommandFields[command->type]; /* increment the transaction number for the first attempt of each * retryable write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); /* Add this batch size so we skip these documents next time */ payload_total_offset += payload_batch_size; payload_batch_size = 0; /* If a retryable error is encountered and the write is retryable, * select a new writable stream and retry. If server selection fails or * the selected server does not support retryable writes, fall through * and allow the original error to be reported. */ error_type = _mongoc_write_error_get_type (ret, error, &reply); if (is_retryable && error_type == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, cs, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; bson_destroy (&reply); GOTO (retry); } } if (!ret) { result->failed = true; result->must_stop = true; } /* Result merge needs to know the absolute index for a document * so it can rewrite the error message which contains the relative * document index per batch */ _mongoc_write_result_merge (result, command, &reply, index_offset); index_offset += document_count; document_count = 0; bson_destroy (&reply); } /* While we have more documents to write */ } while (payload_total_offset < command->payload.len); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } if (ret) { /* if a retry succeeded, clear the initial error */ memset (&result->error, 0, sizeof (bson_error_t)); } EXIT; }
static void validate (const char *name) /* IN */ { mongoc_stream_t *stream; mongoc_rpc_t rpc; #ifdef _WIN32 struct _stat st; #else struct stat st; #endif uint8_t *buf; int32_t len; int ret; stream = mongoc_stream_file_new_for_path (name, O_RDONLY, 0); if (!stream) { perror ("failed to open file"); exit (EXIT_FAILURE); } #ifdef _WIN32 ret = _stat (name, &st); #else ret = stat (name, &st); #endif if (ret != 0) { perror ("failed to stat() file."); exit (EXIT_FAILURE); } if ((st.st_size > (100 * 1024 * 1024)) || (st.st_size < 16)) { fprintf (stderr, "%s: unreasonable message size\n", name); exit (EXIT_FAILURE); } buf = malloc (st.st_size); if (buf == NULL) { fprintf (stderr, "%s: Failed to malloc %d bytes.\n", name, (int)st.st_size); exit (EXIT_FAILURE); } if (st.st_size != mongoc_stream_read (stream, buf, st.st_size, st.st_size, -1)) { fprintf (stderr, "%s: Failed to read %d bytes into buffer.\n", name, (int)st.st_size); exit (EXIT_FAILURE); } memcpy (&len, buf, 4); len = BSON_UINT32_FROM_LE (len); if (len != st.st_size) { fprintf (stderr, "%s is invalid. Invalid Length.\n", name); exit (EXIT_FAILURE); } if (!_mongoc_rpc_scatter (&rpc, buf, st.st_size)) { fprintf (stderr, "%s is invalid. Invalid Format.\n", name); exit (EXIT_FAILURE); } fprintf (stdout, "%s is valid.\n", name); bson_free (buf); }
static bool _mongoc_cursor_query (mongoc_cursor_t *cursor) { mongoc_rpc_t rpc; uint32_t hint; uint32_t request_id; ENTRY; bson_return_val_if_fail (cursor, false); if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) { cursor->failed = true; RETURN (false); } rpc.query.msg_len = 0; rpc.query.request_id = 0; rpc.query.response_to = 0; rpc.query.opcode = MONGOC_OPCODE_QUERY; rpc.query.flags = cursor->flags; rpc.query.collection = cursor->ns; rpc.query.skip = cursor->skip; if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) { rpc.query.n_return = 0; } else { rpc.query.n_return = _mongoc_n_return(cursor); } rpc.query.query = bson_get_data(&cursor->query); if (cursor->has_fields) { rpc.query.fields = bson_get_data (&cursor->fields); } else { rpc.query.fields = NULL; } if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1, cursor->hint, NULL, cursor->read_prefs, &cursor->error))) { GOTO (failure); } cursor->hint = hint; request_id = BSON_UINT32_FROM_LE(rpc.header.request_id); _mongoc_buffer_clear(&cursor->buffer, false); if (!_mongoc_client_recv(cursor->client, &cursor->rpc, &cursor->buffer, hint, &cursor->error)) { GOTO (failure); } if (cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid opcode. Expected %d, got %d.", MONGOC_OPCODE_REPLY, cursor->rpc.header.opcode); GOTO (failure); } if (cursor->rpc.header.response_to != request_id) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid response_to. Expected %d, got %d.", request_id, cursor->rpc.header.response_to); GOTO (failure); } if (_mongoc_cursor_unwrap_failure(cursor)) { if ((cursor->error.domain == MONGOC_ERROR_QUERY) && (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) { cursor->failed = true; } GOTO (failure); } if (cursor->reader) { bson_reader_destroy(cursor->reader); } cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents, cursor->rpc.reply.documents_len); if ((cursor->flags & MONGOC_QUERY_EXHAUST)) { cursor->in_exhaust = true; cursor->client->in_exhaust = true; } cursor->done = false; cursor->end_of_event = false; cursor->sent = true; RETURN (true); failure: cursor->failed = true; cursor->done = true; RETURN (false); }
static void test_bson_iter_fuzz (void) { uint8_t *data; uint32_t len = 512; uint32_t len_le; uint32_t r; bson_iter_t iter; bson_t *b; uint32_t i; int pass; len_le = BSON_UINT32_TO_LE(len); for (pass = 0; pass < FUZZ_N_PASSES; pass++) { data = bson_malloc0(len); memcpy(data, &len_le, sizeof (len_le)); for (i = 4; i < len; i += 4) { r = rand(); memcpy(&data[i], &r, sizeof (r)); } if (!(b = bson_new_from_data(data, len))) { /* * It could fail on buffer length or missing trailing null byte. */ bson_free (data); continue; } BSON_ASSERT(b); /* * TODO: Most of the following ignores the key. That should be fixed * but has it's own perils too. */ assert(bson_iter_init(&iter, b)); while (bson_iter_next(&iter)) { assert(iter.next_off < len); switch (bson_iter_type(&iter)) { case BSON_TYPE_ARRAY: case BSON_TYPE_DOCUMENT: { const uint8_t *child = NULL; uint32_t child_len = 0; bson_iter_document(&iter, &child_len, &child); if (child_len) { assert(child); assert(child_len >= 5); assert((iter.off + child_len) < b->len); assert(child_len < (uint32_t) -1); memcpy(&child_len, child, sizeof (child_len)); child_len = BSON_UINT32_FROM_LE(child_len); assert(child_len >= 5); } } break; case BSON_TYPE_DOUBLE: case BSON_TYPE_UTF8: case BSON_TYPE_BINARY: case BSON_TYPE_UNDEFINED: break; case BSON_TYPE_OID: assert(iter.off + 12 < iter.len); break; case BSON_TYPE_BOOL: case BSON_TYPE_DATE_TIME: case BSON_TYPE_NULL: case BSON_TYPE_REGEX: /* TODO: check for 2 valid cstring. */ case BSON_TYPE_DBPOINTER: case BSON_TYPE_CODE: case BSON_TYPE_SYMBOL: case BSON_TYPE_CODEWSCOPE: case BSON_TYPE_INT32: case BSON_TYPE_TIMESTAMP: case BSON_TYPE_INT64: case BSON_TYPE_DECIMAL128: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: break; case BSON_TYPE_EOD: default: /* Code should not be reached. */ assert(false); break; } } bson_destroy(b); bson_free(data); } }
mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_rpc (mongoc_async_cmd_t *acmd) { ssize_t bytes = _mongoc_buffer_try_append_from_stream ( &acmd->buffer, acmd->stream, acmd->bytes_to_read, 0); if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to receive rpc bytes from server."); return MONGOC_ASYNC_CMD_ERROR; } if (bytes == 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Server closed connection."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = (size_t) (acmd->bytes_to_read - bytes); if (!acmd->bytes_to_read) { if (!_mongoc_rpc_scatter ( &acmd->rpc, acmd->buffer.data, acmd->buffer.len)) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server."); return MONGOC_ASYNC_CMD_ERROR; } if (BSON_UINT32_FROM_LE (acmd->rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) { uint8_t *buf = NULL; size_t len = BSON_UINT32_FROM_LE (acmd->rpc.compressed.uncompressed_size) + sizeof (mongoc_rpc_header_t); buf = bson_malloc0 (len); if (!_mongoc_rpc_decompress (&acmd->rpc, buf, len)) { bson_free (buf); bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Could not decompress server reply"); return MONGOC_ASYNC_CMD_ERROR; } _mongoc_buffer_destroy (&acmd->buffer); _mongoc_buffer_init (&acmd->buffer, buf, len, NULL, NULL); } _mongoc_rpc_swab_from_le (&acmd->rpc); if (!_mongoc_rpc_get_first_document (&acmd->rpc, &acmd->reply)) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server"); return MONGOC_ASYNC_CMD_ERROR; } acmd->reply_needs_cleanup = true; return MONGOC_ASYNC_CMD_SUCCESS; } return MONGOC_ASYNC_CMD_IN_PROGRESS; }
/* fire command-succeeded event as if we'd used a modern write command. * note, cluster.request_id was incremented once for the write, again * for the getLastError, so cluster.request_id is no longer valid; used the * passed-in request_id instead. */ static void _mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client, int64_t duration, mongoc_write_command_t *command, const bson_t *gle, mongoc_server_stream_t *stream, int64_t request_id) { bson_iter_t iter; bson_t doc; int64_t ok = 1; int64_t n = 0; uint32_t code = 8; bool wtimeout = false; /* server error message */ const char *errmsg = NULL; size_t errmsg_len = 0; /* server errInfo subdocument */ bool has_errinfo = false; uint32_t len; const uint8_t *data; bson_t errinfo; /* server upsertedId value */ bool has_upserted_id = false; bson_value_t upserted_id; /* server updatedExisting value */ bool has_updated_existing = false; bool updated_existing = false; mongoc_apm_command_succeeded_t event; ENTRY; if (!client->apm_callbacks.succeeded) { EXIT; } /* first extract interesting fields from getlasterror response */ if (gle) { bson_iter_init (&iter, gle); while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "ok")) { ok = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "n")) { n = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "code")) { code = (uint32_t) bson_iter_as_int64 (&iter); if (code == 0) { /* server sent non-numeric error code? */ code = 8; } } else if (!strcmp (bson_iter_key (&iter), "upserted")) { has_upserted_id = true; bson_value_copy (bson_iter_value (&iter), &upserted_id); } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) { has_updated_existing = true; updated_existing = bson_iter_as_bool (&iter); } else if ((!strcmp (bson_iter_key (&iter), "err") || !strcmp (bson_iter_key (&iter), "errmsg")) && BSON_ITER_HOLDS_UTF8 (&iter)) { errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len); } else if (!strcmp (bson_iter_key (&iter), "errInfo") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); bson_init_static (&errinfo, data, len); has_errinfo = true; } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) { wtimeout = true; } } } /* based on PyMongo's _convert_write_result() */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, (int32_t) ok); if (errmsg && !wtimeout) { /* Failure, but pass to the success callback. Command Monitoring Spec: * "Commands that executed on the server and return a status of {ok: 1} * are considered successful commands and fire CommandSucceededEvent. * Commands that have write errors are included since the actual command * did succeed, only writes failed." */ append_write_err ( &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL); } else { /* Success, perhaps with a writeConcernError. */ if (errmsg) { append_write_concern_err (&doc, errmsg, errmsg_len); } if (command->type == MONGOC_WRITE_COMMAND_INSERT) { /* GLE result for insert is always 0 in most MongoDB versions. */ n = command->n_documents; } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) { if (has_upserted_id) { append_upserted (&doc, &upserted_id); } else if (has_updated_existing && !updated_existing && n == 1) { bson_t tmp; int32_t bson_len = 0; memcpy (&bson_len, command->payload.data, 4); bson_len = BSON_UINT32_FROM_LE (bson_len); bson_init_static (&tmp, command->payload.data, bson_len); has_upserted_id = get_upserted_id (&tmp, &upserted_id); if (has_upserted_id) { append_upserted (&doc, &upserted_id); } } } } bson_append_int32 (&doc, "n", 1, (int32_t) n); mongoc_apm_command_succeeded_init ( &event, duration, &doc, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); if (has_upserted_id) { bson_value_destroy (&upserted_id); } EXIT; }
static void * mock_server_worker (void *data) { mongoc_buffer_t buffer; mongoc_stream_t *stream; mock_server_t *server; mongoc_rpc_t rpc; bson_error_t error; int32_t msg_len; void **closure = data; ENTRY; BSON_ASSERT(closure); server = closure[0]; stream = closure[1]; _mongoc_buffer_init(&buffer, NULL, 0, NULL, NULL); again: if (_mongoc_buffer_fill (&buffer, stream, 4, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= 4); memcpy (&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); GOTO (failure); } if (_mongoc_buffer_fill (&buffer, stream, msg_len, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= (unsigned)msg_len); DUMP_BYTES (buffer, buffer.data + buffer.off, buffer.len); if (!_mongoc_rpc_scatter(&rpc, buffer.data + buffer.off, msg_len)) { MONGOC_WARNING ("%s():%d: %s", __FUNCTION__, __LINE__, "Failed to scatter"); GOTO (failure); } _mongoc_rpc_swab_from_le(&rpc); if (!handle_command(server, stream, &rpc)) { server->handler(server, stream, &rpc, server->handler_data); } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; GOTO (again); failure: mongoc_stream_close (stream); mongoc_stream_destroy (stream); bson_free(closure); _mongoc_buffer_destroy (&buffer); RETURN (NULL); }
/** * Performs a find query on a MongoDB collection, selecting certain fields from * the results and storing them in Monary columns. * * @param collection The MongoDB collection to query against. * @param offset The number of documents to skip, or zero. * @param limit The maximum number of documents to return, or zero. * @param query A pointer to a BSON buffer representing the query. * @param coldata The column data to store the results in. * @param select_fields If truthy, select exactly the fields from the database * that match the fields in coldata. If false, the query will find and return * all fields from matching documents. * * @return If successful, a Monary cursor that should be freed with * monary_close_query() when no longer in use. If unsuccessful, or if an * invalid query was passed in, NULL is returned. */ monary_cursor* monary_init_query(mongoc_collection_t* collection, uint32_t offset, uint32_t limit, const uint8_t* query, monary_column_data* coldata, int select_fields) { bson_t query_bson; // BSON representing the query to perform bson_t* fields_bson; // BSON holding the fields to select int32_t query_size; monary_cursor* cursor; mongoc_cursor_t* mcursor; // A MongoDB cursor // Sanity checks if (!collection || !query || !coldata) { DEBUG("%s", "Given a NULL param."); return NULL; } // build BSON query data memcpy(&query_size, query, sizeof(int32_t)); query_size = (int32_t) BSON_UINT32_FROM_LE(query_size); if (!bson_init_static(&query_bson, query, query_size)) { DEBUG("%s", "Failed to initialize raw BSON query"); return NULL; } fields_bson = NULL; // build BSON fields list (if necessary) if(select_fields) { fields_bson = bson_new(); if (!fields_bson) { DEBUG("%s", "An error occurred while allocating memory for BSON data"); return NULL; } monary_get_bson_fields_list(coldata, fields_bson); } // create query cursor mcursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, offset, limit, 0, &query_bson, fields_bson, NULL); // destroy BSON fields bson_destroy(&query_bson); if(fields_bson) { bson_destroy(fields_bson); } if (!mcursor) { DEBUG("%s", "An error occurred with the query"); return NULL; } // finally, create a new Monary cursor cursor = (monary_cursor*) malloc(sizeof(monary_cursor)); cursor->mcursor = mcursor; cursor->coldata = coldata; return cursor; }