const char * mongoc_uri_get_database (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->database; }
mongoc_collection_t * mongoc_database_create_collection (mongoc_database_t *database, const char *name, const bson_t *options, bson_error_t *error) { mongoc_collection_t *collection = NULL; bson_iter_t iter; bson_t cmd; bool capped = false; BSON_ASSERT (database); BSON_ASSERT (name); if (strchr (name, '$')) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID, "The namespace \"%s\" is invalid.", name); return NULL; } if (options) { if (bson_iter_init_find (&iter, options, "capped")) { if (!BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"capped\" must be a boolean."); return NULL; } capped = bson_iter_bool (&iter); } if (bson_iter_init_find (&iter, options, "autoIndexId") && !BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"autoIndexId\" must be a boolean."); return NULL; } if (bson_iter_init_find (&iter, options, "size")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"size\" must be an integer."); return NULL; } if (!capped) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"size\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, options, "max")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"max\" must be an integer."); return NULL; } if (!capped) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"max\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, options, "storage")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"storage\" parameter must be a document"); return NULL; } if (bson_iter_find (&iter, "wiredtiger")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredtiger\" option must take a document argument with a \"configString\" field"); return NULL; } if (bson_iter_find (&iter, "configString")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"configString\" parameter must be a string"); return NULL; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredtiger\" option must take a document argument with a \"configString\" field"); return NULL; } } } } bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "create", name); if (options) { if (!bson_iter_init (&iter, options)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"options\" is corrupt or invalid."); bson_destroy (&cmd); return NULL; } while (bson_iter_next (&iter)) { if (!bson_append_iter (&cmd, bson_iter_key (&iter), -1, &iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Failed to append \"options\" to create command."); bson_destroy (&cmd); return NULL; } } } if (mongoc_database_command_simple (database, &cmd, NULL, NULL, error)) { collection = _mongoc_collection_new (database->client, database->name, name, database->read_prefs, database->read_concern, database->write_concern); } bson_destroy (&cmd); return collection; }
mongoc_stream_t * mongoc_client_default_stream_initiator (const mongoc_uri_t *uri, const mongoc_host_list_t *host, void *user_data, bson_error_t *error) { mongoc_stream_t *base_stream = NULL; #ifdef MONGOC_ENABLE_SSL mongoc_client_t *client = (mongoc_client_t *)user_data; const bson_t *options; bson_iter_t iter; const char *mechanism; int32_t connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS; #endif BSON_ASSERT (uri); BSON_ASSERT (host); #ifndef MONGOC_ENABLE_SSL if (mongoc_uri_get_ssl (uri)) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER, "SSL is not enabled in this build of mongo-c-driver."); return NULL; } #endif switch (host->family) { #if defined(AF_INET6) case AF_INET6: #endif case AF_INET: base_stream = mongoc_client_connect_tcp (uri, host, error); break; case AF_UNIX: base_stream = mongoc_client_connect_unix (uri, host, error); break; default: bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_INVALID_TYPE, "Invalid address family: 0x%02x", host->family); break; } #ifdef MONGOC_ENABLE_SSL if (base_stream) { options = mongoc_uri_get_options (uri); mechanism = mongoc_uri_get_auth_mechanism (uri); if ((bson_iter_init_find_case (&iter, options, "ssl") && bson_iter_as_bool (&iter)) || (mechanism && (0 == strcmp (mechanism, "MONGODB-X509")))) { base_stream = mongoc_stream_tls_new (base_stream, &client->ssl_opts, true); if (!base_stream) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed initialize TLS state."); return NULL; } connecttimeoutms = mongoc_uri_get_option_as_int32 ( uri, "connecttimeoutms", MONGOC_DEFAULT_CONNECTTIMEOUTMS); if (!mongoc_stream_tls_do_handshake (base_stream, connecttimeoutms) || !mongoc_stream_tls_check_cert (base_stream, host->host)) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to handshake and validate TLS certificate."); mongoc_stream_destroy (base_stream); return NULL; } } } #endif return base_stream ? mongoc_stream_buffered_new (base_stream, 1024) : NULL; }
static bool mongoc_database_add_user_legacy (mongoc_database_t *database, const char *username, const char *password, bson_error_t *error) { mongoc_collection_t *collection; mongoc_cursor_t *cursor = NULL; const bson_t *doc; bool ret = false; bson_t query; bson_t user; char *input; char *pwd = NULL; ENTRY; BSON_ASSERT (database); BSON_ASSERT (username); BSON_ASSERT (password); /* * Users are stored in the <dbname>.system.users virtual collection. */ collection = mongoc_client_get_collection(database->client, database->name, "system.users"); BSON_ASSERT(collection); /* * Hash the users password. */ input = bson_strdup_printf("%s:mongo:%s", username, password); pwd = _mongoc_hex_md5(input); bson_free(input); /* * Check to see if the user exists. If so, we will update the * password instead of inserting a new user. */ bson_init(&query); bson_append_utf8(&query, "user", 4, username, -1); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 1, 0, &query, NULL, NULL); if (!mongoc_cursor_next(cursor, &doc)) { if (mongoc_cursor_error(cursor, error)) { GOTO (failure); } bson_init(&user); bson_append_utf8(&user, "user", 4, username, -1); bson_append_bool(&user, "readOnly", 8, false); bson_append_utf8(&user, "pwd", 3, pwd, -1); } else { bson_init(&user); bson_copy_to_excluding_noinit(doc, &user, "pwd", (char *)NULL); bson_append_utf8(&user, "pwd", 3, pwd, -1); } if (!mongoc_collection_save(collection, &user, NULL, error)) { GOTO (failure_with_user); } ret = true; failure_with_user: bson_destroy(&user); failure: if (cursor) { mongoc_cursor_destroy(cursor); } mongoc_collection_destroy(collection); bson_destroy(&query); bson_free(pwd); RETURN (ret); }
const mongoc_read_prefs_t * mongoc_database_get_read_prefs (const mongoc_database_t *database) /* IN */ { BSON_ASSERT (database); return database->read_prefs; }
const bson_t * mongoc_read_prefs_get_tags (const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (read_prefs); return &read_prefs->tags; }
const mongoc_write_concern_t * mongoc_transaction_opts_get_write_concern (const mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->write_concern; }
static void _mongoc_uri_build_write_concern (mongoc_uri_t *uri) /* IN */ { mongoc_write_concern_t *write_concern; const char *str; bson_iter_t iter; int32_t wtimeoutms; int value; BSON_ASSERT (uri); write_concern = mongoc_write_concern_new (); if (bson_iter_init_find_case (&iter, &uri->options, "safe") && BSON_ITER_HOLDS_BOOL (&iter)) { mongoc_write_concern_set_w (write_concern, bson_iter_bool (&iter) ? 1 : MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED); } wtimeoutms = mongoc_uri_get_option_as_int32(uri, "wtimeoutms", 0); if (bson_iter_init_find_case (&iter, &uri->options, "journal") && BSON_ITER_HOLDS_BOOL (&iter)) { mongoc_write_concern_set_journal (write_concern, bson_iter_bool (&iter)); } if (bson_iter_init_find_case (&iter, &uri->options, "w")) { if (BSON_ITER_HOLDS_INT32 (&iter)) { value = bson_iter_int32 (&iter); switch (value) { case MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED: case MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED: /* Warn on conflict, since write concern will be validated later */ if (mongoc_write_concern_get_journal(write_concern)) { MONGOC_WARNING("Journal conflicts with w value [w=%d].", value); } mongoc_write_concern_set_w(write_concern, value); break; default: if (value > 0) { mongoc_write_concern_set_w (write_concern, value); if (value > 1) { mongoc_write_concern_set_wtimeout (write_concern, wtimeoutms); } break; } MONGOC_WARNING ("Unsupported w value [w=%d].", value); break; } } else if (BSON_ITER_HOLDS_UTF8 (&iter)) { str = bson_iter_utf8 (&iter, NULL); if (0 == strcasecmp ("majority", str)) { mongoc_write_concern_set_wmajority (write_concern, wtimeoutms); } else { mongoc_write_concern_set_wtag (write_concern, str); mongoc_write_concern_set_wtimeout (write_concern, wtimeoutms); } } else { BSON_ASSERT (false); } } uri->write_concern = write_concern; }
static mongoc_stream_t * mongoc_client_connect_tcp (const mongoc_uri_t *uri, const mongoc_host_list_t *host, bson_error_t *error) { mongoc_socket_t *sock = NULL; struct addrinfo hints; struct addrinfo *result, *rp; int32_t connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS; int64_t expire_at; const bson_t *options; bson_iter_t iter; char portstr [8]; int s; ENTRY; bson_return_val_if_fail (uri, NULL); bson_return_val_if_fail (host, NULL); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, "connecttimeoutms") && BSON_ITER_HOLDS_INT32 (&iter)) { if (!(connecttimeoutms = bson_iter_int32(&iter))) { connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS; } } BSON_ASSERT (connecttimeoutms); expire_at = bson_get_monotonic_time () + (connecttimeoutms * 1000L); bson_snprintf (portstr, sizeof portstr, "%hu", host->port); memset (&hints, 0, sizeof hints); hints.ai_family = host->family; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = 0; hints.ai_protocol = 0; s = getaddrinfo (host->host, portstr, &hints, &result); if (s != 0) { mongoc_counter_dns_failure_inc (); bson_set_error(error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Failed to resolve %s", host->host); RETURN (NULL); } mongoc_counter_dns_success_inc (); for (rp = result; rp; rp = rp->ai_next) { /* * Create a new non-blocking socket. */ if (!(sock = mongoc_socket_new (rp->ai_family, rp->ai_socktype, rp->ai_protocol))) { continue; } /* * Try to connect to the peer. */ if (0 != mongoc_socket_connect (sock, rp->ai_addr, (socklen_t)rp->ai_addrlen, expire_at)) { char *errmsg; char errmsg_buf[BSON_ERROR_BUFFER_SIZE]; char ip[255]; mongoc_socket_inet_ntop (rp, ip, sizeof ip); errmsg = bson_strerror_r ( mongoc_socket_errno (sock), errmsg_buf, sizeof errmsg_buf); MONGOC_WARNING ("Failed to connect to: %s:%d, error: %d, %s\n", ip, host->port, mongoc_socket_errno(sock), errmsg); mongoc_socket_destroy (sock); sock = NULL; continue; } break; } if (!sock) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "Failed to connect to target host: %s", host->host_and_port); freeaddrinfo (result); RETURN (NULL); } freeaddrinfo (result); return mongoc_stream_socket_new (sock); }
const mongoc_host_list_t * mongoc_uri_get_hosts (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->hosts; }
const bson_t * mongoc_uri_get_credentials (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return &uri->credentials; }
const bson_t * mongoc_uri_get_read_prefs (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return mongoc_read_prefs_get_tags(uri->read_prefs); }
const char * mongoc_uri_get_string (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->str; }
const bson_t * mongoc_uri_get_options (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return &uri->options; }
void bson_value_copy (const bson_value_t *src, /* IN */ bson_value_t *dst) /* OUT */ { BSON_ASSERT (src); BSON_ASSERT (dst); dst->value_type = src->value_type; switch (src->value_type) { case BSON_TYPE_DOUBLE: dst->value.v_double = src->value.v_double; break; case BSON_TYPE_UTF8: dst->value.v_utf8.len = src->value.v_utf8.len; dst->value.v_utf8.str = bson_malloc (src->value.v_utf8.len + 1); memcpy (dst->value.v_utf8.str, src->value.v_utf8.str, dst->value.v_utf8.len); dst->value.v_utf8.str [dst->value.v_utf8.len] = '\0'; break; case BSON_TYPE_DOCUMENT: case BSON_TYPE_ARRAY: dst->value.v_doc.data_len = src->value.v_doc.data_len; dst->value.v_doc.data = bson_malloc (src->value.v_doc.data_len); memcpy (dst->value.v_doc.data, src->value.v_doc.data, dst->value.v_doc.data_len); break; case BSON_TYPE_BINARY: dst->value.v_binary.subtype = src->value.v_binary.subtype; dst->value.v_binary.data_len = src->value.v_binary.data_len; dst->value.v_binary.data = bson_malloc (src->value.v_binary.data_len); memcpy (dst->value.v_binary.data, src->value.v_binary.data, dst->value.v_binary.data_len); break; case BSON_TYPE_OID: bson_oid_copy (&src->value.v_oid, &dst->value.v_oid); break; case BSON_TYPE_BOOL: dst->value.v_bool = src->value.v_bool; break; case BSON_TYPE_DATE_TIME: dst->value.v_datetime = src->value.v_datetime; break; case BSON_TYPE_REGEX: dst->value.v_regex.regex = bson_strdup (src->value.v_regex.regex); dst->value.v_regex.options = bson_strdup (src->value.v_regex.options); break; case BSON_TYPE_DBPOINTER: dst->value.v_dbpointer.collection_len = src->value.v_dbpointer.collection_len; dst->value.v_dbpointer.collection = bson_malloc (src->value.v_dbpointer.collection_len + 1); memcpy (dst->value.v_dbpointer.collection, src->value.v_dbpointer.collection, dst->value.v_dbpointer.collection_len); dst->value.v_dbpointer.collection [dst->value.v_dbpointer.collection_len] = '\0'; bson_oid_copy (&src->value.v_dbpointer.oid, &dst->value.v_dbpointer.oid); break; case BSON_TYPE_CODE: dst->value.v_code.code_len = src->value.v_code.code_len; dst->value.v_code.code = bson_malloc (src->value.v_code.code_len + 1); memcpy (dst->value.v_code.code, src->value.v_code.code, dst->value.v_code.code_len); dst->value.v_code.code [dst->value.v_code.code_len] = '\0'; break; case BSON_TYPE_SYMBOL: dst->value.v_symbol.len = src->value.v_symbol.len; dst->value.v_symbol.symbol = bson_malloc (src->value.v_symbol.len + 1); memcpy (dst->value.v_symbol.symbol, src->value.v_symbol.symbol, dst->value.v_symbol.len); dst->value.v_symbol.symbol [dst->value.v_symbol.len] = '\0'; break; case BSON_TYPE_CODEWSCOPE: dst->value.v_codewscope.code_len = src->value.v_codewscope.code_len; dst->value.v_codewscope.code = bson_malloc (src->value.v_codewscope.code_len + 1); memcpy (dst->value.v_codewscope.code, src->value.v_codewscope.code, dst->value.v_codewscope.code_len); dst->value.v_codewscope.code [dst->value.v_codewscope.code_len] = '\0'; dst->value.v_codewscope.scope_len = src->value.v_codewscope.scope_len; dst->value.v_codewscope.scope_data = bson_malloc (src->value.v_codewscope.scope_len); memcpy (dst->value.v_codewscope.scope_data, src->value.v_codewscope.scope_data, dst->value.v_codewscope.scope_len); break; case BSON_TYPE_INT32: dst->value.v_int32 = src->value.v_int32; break; case BSON_TYPE_TIMESTAMP: dst->value.v_timestamp.timestamp = src->value.v_timestamp.timestamp; dst->value.v_timestamp.increment = src->value.v_timestamp.increment; break; case BSON_TYPE_INT64: dst->value.v_int64 = src->value.v_int64; break; case BSON_TYPE_UNDEFINED: case BSON_TYPE_NULL: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: break; case BSON_TYPE_EOD: default: BSON_ASSERT (false); return; } }
mongoc_async_cmd_result_t _mongoc_async_cmd_phase_send (mongoc_async_cmd_t *acmd) { size_t total_bytes = 0; size_t offset; ssize_t bytes; int i; /* if a continued write, then iovec will be set to a temporary copy */ bool used_temp_iovec = false; mongoc_iovec_t *iovec = acmd->iovec; size_t niovec = acmd->niovec; for (i = 0; i < acmd->niovec; i++) { total_bytes += acmd->iovec[i].iov_len; } if (acmd->bytes_written > 0) { BSON_ASSERT (acmd->bytes_written < total_bytes); /* if bytes have been written before, compute the offset in the next * iovec entry to be written. */ offset = acmd->bytes_written; /* subtract the lengths of all iovec entries written so far. */ for (i = 0; i < acmd->niovec; i++) { if (offset < acmd->iovec[i].iov_len) { break; } offset -= acmd->iovec[i].iov_len; } BSON_ASSERT (i < acmd->niovec); /* create a new iovec with the remaining data to be written. */ niovec = acmd->niovec - i; iovec = bson_malloc (niovec * sizeof (mongoc_iovec_t)); memcpy (iovec, acmd->iovec + i, niovec * sizeof (mongoc_iovec_t)); iovec[0].iov_base = (char *) iovec[0].iov_base + offset; iovec[0].iov_len -= offset; used_temp_iovec = true; } bytes = mongoc_stream_writev (acmd->stream, iovec, niovec, 0); if (used_temp_iovec) { bson_free (iovec); } if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to write rpc bytes."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_written += bytes; if (acmd->bytes_written < total_bytes) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } acmd->state = MONGOC_ASYNC_CMD_RECV_LEN; acmd->bytes_to_read = 4; acmd->events = POLLIN; acmd->cmd_started = bson_get_monotonic_time (); return MONGOC_ASYNC_CMD_IN_PROGRESS; }
void apply_read_preferences (const mongoc_read_prefs_t *read_prefs, const mongoc_server_stream_t *server_stream, const bson_t *query_bson, mongoc_query_flags_t initial_flags, mongoc_apply_read_prefs_result_t *result /* OUT */) { mongoc_server_description_type_t server_type; ENTRY; BSON_ASSERT (server_stream); BSON_ASSERT (query_bson); BSON_ASSERT (result); /* default values */ result->query_with_read_prefs = (bson_t *) query_bson; result->query_owned = false; result->flags = initial_flags; server_type = server_stream->sd->type; switch (server_stream->topology_type) { case MONGOC_TOPOLOGY_SINGLE: if (server_type == MONGOC_SERVER_MONGOS) { _apply_read_preferences_mongos (read_prefs, query_bson, result); } else { /* Server Selection Spec: for topology type single and server types * besides mongos, "clients MUST always set the slaveOK wire protocol * flag on reads to ensure that any server type can handle the * request." */ result->flags |= MONGOC_QUERY_SLAVE_OK; } break; case MONGOC_TOPOLOGY_RS_NO_PRIMARY: case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: /* Server Selection Spec: for RS topology types, "For all read * preferences modes except primary, clients MUST set the slaveOK wire * protocol flag to ensure that any suitable server can handle the * request. Clients MUST NOT set the slaveOK wire protocol flag if the * read preference mode is primary. */ if (read_prefs && read_prefs->mode != MONGOC_READ_PRIMARY) { result->flags |= MONGOC_QUERY_SLAVE_OK; } break; case MONGOC_TOPOLOGY_SHARDED: _apply_read_preferences_mongos (read_prefs, query_bson, result); break; case MONGOC_TOPOLOGY_UNKNOWN: case MONGOC_TOPOLOGY_DESCRIPTION_TYPES: default: /* must not call _apply_read_preferences with unknown topology type */ BSON_ASSERT (false); } EXIT; }
/*--------------------------------------------------------------------------- * * _make_cursor -- * * Construct and send the aggregate command and create the resulting * cursor. On error, stream->cursor remains NULL, otherwise it is * created and must be destroyed. * * Return: * False on error and sets stream->err. * *-------------------------------------------------------------------------- */ static bool _make_cursor (mongoc_change_stream_t *stream) { mongoc_client_session_t *cs = NULL; bson_t command_opts; bson_t command; /* { aggregate: "coll", pipeline: [], ... } */ bson_t reply; bson_t getmore_opts = BSON_INITIALIZER; bson_iter_t iter; mongoc_server_description_t *sd; uint32_t server_id; int32_t max_wire_version = -1; BSON_ASSERT (stream); BSON_ASSERT (!stream->cursor); _make_command (stream, &command); bson_copy_to (&(stream->opts.extra), &command_opts); sd = mongoc_client_select_server ( stream->client, false /* for_writes */, stream->read_prefs, &stream->err); if (!sd) { goto cleanup; } server_id = mongoc_server_description_id (sd); bson_append_int32 (&command_opts, "serverId", 8, server_id); bson_append_int32 (&getmore_opts, "serverId", 8, server_id); max_wire_version = sd->max_wire_version; mongoc_server_description_destroy (sd); if (bson_iter_init_find (&iter, &command_opts, "sessionId")) { if (!_mongoc_client_session_from_iter ( stream->client, &iter, &cs, &stream->err)) { goto cleanup; } } else if (stream->implicit_session) { /* If an implicit session was created before, and this cursor is now * being recreated after resuming, then use the same session as before. */ cs = stream->implicit_session; if (!mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } else { /* Create an implicit session. This session lsid must be the same for the * agg command and the subsequent getMores. Thus, this implicit session is * passed as if it were an explicit session to * collection_read_command_with_opts and cursor_new_from_reply, but it is * still implicit and its lifetime is owned by this change_stream_t. */ mongoc_session_opt_t *session_opts; session_opts = mongoc_session_opts_new (); mongoc_session_opts_set_causal_consistency (session_opts, false); /* returns NULL if sessions aren't supported. ignore errors. */ cs = mongoc_client_start_session (stream->client, session_opts, NULL); stream->implicit_session = cs; mongoc_session_opts_destroy (session_opts); if (cs && !mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } if (cs && !mongoc_client_session_append (cs, &getmore_opts, &stream->err)) { goto cleanup; } if (stream->read_concern && !bson_has_field (&command_opts, "readConcern")) { mongoc_read_concern_append (stream->read_concern, &command_opts); } /* even though serverId has already been set, still pass the read prefs. * they are necessary for OP_MSG if sending to a secondary. */ if (!mongoc_client_read_command_with_opts (stream->client, stream->db, &command, stream->read_prefs, &command_opts, &reply, &stream->err)) { bson_destroy (&stream->err_doc); bson_copy_to (&reply, &stream->err_doc); bson_destroy (&reply); goto cleanup; } bson_append_bool ( &getmore_opts, MONGOC_CURSOR_TAILABLE, MONGOC_CURSOR_TAILABLE_LEN, true); bson_append_bool (&getmore_opts, MONGOC_CURSOR_AWAIT_DATA, MONGOC_CURSOR_AWAIT_DATA_LEN, true); /* maxTimeMS is only appended to getMores if these are set in cursor opts. */ if (stream->max_await_time_ms > 0) { bson_append_int64 (&getmore_opts, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, MONGOC_CURSOR_MAX_AWAIT_TIME_MS_LEN, stream->max_await_time_ms); } if (stream->batch_size > 0) { bson_append_int32 (&getmore_opts, MONGOC_CURSOR_BATCH_SIZE, MONGOC_CURSOR_BATCH_SIZE_LEN, stream->batch_size); } /* Change streams spec: "If neither startAtOperationTime nor resumeAfter are * specified, and the max wire version is >= 7, and the initial aggregate * command does not return a resumeToken (indicating no results), the * ChangeStream MUST save the operationTime from the initial aggregate * command when it returns." */ if (bson_empty (&stream->resume_token) && _mongoc_timestamp_empty (&stream->operation_time) && max_wire_version >= 7 && bson_iter_init_find (&iter, &reply, "operationTime")) { _mongoc_timestamp_set_from_bson (&stream->operation_time, &iter); } /* steals reply. */ stream->cursor = mongoc_cursor_new_from_command_reply_with_opts ( stream->client, &reply, &getmore_opts); cleanup: bson_destroy (&command); bson_destroy (&command_opts); bson_destroy (&getmore_opts); return stream->err.code == 0; }
/* TODO: factor */ static void * worker_thread (void *data) { worker_closure_t *closure = (worker_closure_t *) data; mock_server_t *server = closure->server; mongoc_stream_t *client_stream = closure->client_stream; mongoc_buffer_t buffer; mongoc_rpc_t *rpc = NULL; bool handled; bson_error_t error; int32_t msg_len; bool stopped; sync_queue_t *q; request_t *request; mongoc_array_t autoresponders; ssize_t i; autoresponder_handle_t handle; ENTRY; BSON_ASSERT(closure); _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL); _mongoc_array_init (&autoresponders, sizeof (autoresponder_handle_t)); again: bson_free (rpc); rpc = NULL; handled = false; mongoc_mutex_lock (&server->mutex); stopped = server->stopped; mongoc_mutex_unlock (&server->mutex); if (stopped) { goto failure; } if (_mongoc_buffer_fill (&buffer, client_stream, 4, TIMEOUT, &error) == -1) { GOTO (again); } assert (buffer.len >= 4); memcpy (&msg_len, buffer.data + buffer.off, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16) { MONGOC_WARNING ("No data"); GOTO (failure); } if (_mongoc_buffer_fill (&buffer, client_stream, (size_t) msg_len, -1, &error) == -1) { MONGOC_WARNING ("%s():%d: %s", BSON_FUNC, __LINE__, error.message); GOTO (failure); } assert (buffer.len >= (unsigned) msg_len); /* copies message from buffer */ request = request_new (&buffer, msg_len, server, client_stream, closure->port); mongoc_mutex_lock (&server->mutex); _mongoc_array_copy (&autoresponders, &server->autoresponders); mongoc_mutex_unlock (&server->mutex); if (mock_server_get_verbose (server)) { printf ("%5.2f %hu -> %hu %s\n", mock_server_get_uptime_sec (server), closure->port, server->port, request->as_str); fflush (stdout); } /* run responders most-recently-added-first */ for (i = server->autoresponders.len - 1; i >= 0; i--) { handle = _mongoc_array_index (&server->autoresponders, autoresponder_handle_t, i); if (handle.responder (request, handle.data)) { handled = true; /* responder should destroy the request */ request = NULL; break; } } if (!handled) { q = mock_server_get_queue (server); q_put (q, (void *) request); request = NULL; } memmove (buffer.data, buffer.data + buffer.off + msg_len, buffer.len - msg_len); buffer.off = 0; buffer.len -= msg_len; GOTO (again); failure: _mongoc_array_destroy (&autoresponders); _mongoc_buffer_destroy (&buffer); mongoc_stream_close (client_stream); mongoc_stream_destroy (client_stream); bson_free (rpc); bson_free (closure); _mongoc_buffer_destroy (&buffer); RETURN (NULL); }
bool mongoc_change_stream_next (mongoc_change_stream_t *stream, const bson_t **bson) { bson_iter_t iter; bool ret = false; BSON_ASSERT (stream); BSON_ASSERT (bson); if (stream->err.code != 0) { goto end; } BSON_ASSERT (stream->cursor); if (!mongoc_cursor_next (stream->cursor, bson)) { const bson_t *err_doc; bson_error_t err; bool resumable = false; if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) { /* no error occurred, just no documents left. */ goto end; } resumable = _is_resumable_error (err_doc); if (resumable) { /* recreate the cursor. */ mongoc_cursor_destroy (stream->cursor); stream->cursor = NULL; if (!_make_cursor (stream)) { goto end; } if (!mongoc_cursor_next (stream->cursor, bson)) { resumable = !mongoc_cursor_error_document (stream->cursor, &err, &err_doc); if (resumable) { /* empty batch. */ goto end; } } } if (!resumable) { stream->err = err; bson_destroy (&stream->err_doc); bson_copy_to (err_doc, &stream->err_doc); goto end; } } /* we have received documents, either from the first call to next or after a * resume. */ if (!bson_iter_init_find (&iter, *bson, "_id")) { bson_set_error (&stream->err, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN, "Cannot provide resume functionality when the resume " "token is missing"); goto end; } /* copy the resume token. */ bson_reinit (&stream->resume_token); BSON_APPEND_VALUE ( &stream->resume_token, "resumeAfter", bson_iter_value (&iter)); /* clear out the operation time, since we no longer need it to resume. */ _mongoc_timestamp_clear (&stream->operation_time); ret = true; end: /* Driver Sessions Spec: "When an implicit session is associated with a * cursor for use with getMore operations, the session MUST be returned to * the pool immediately following a getMore operation that indicates that the * cursor has been exhausted." */ if (stream->implicit_session) { /* if creating the change stream cursor errored, it may be null. */ if (!stream->cursor || stream->cursor->cursor_id == 0) { mongoc_client_session_destroy (stream->implicit_session); stream->implicit_session = NULL; } } return ret; }
const mongoc_read_prefs_t * mongoc_transaction_opts_get_read_prefs (const mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->read_prefs; }
/* construct the aggregate command in cmd. looks like one of the following: * for a collection change stream: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } * for a database change stream: * { aggregate: 1, pipeline: [], cursor: { batchSize: x } } * for a client change stream: * { aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], * cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); if (stream->change_stream_type == MONGOC_CHANGE_STREAM_COLLECTION) { bson_append_utf8 ( command, "aggregate", 9, stream->coll, (int) strlen (stream->coll)); } else { bson_append_int32 (command, "aggregate", 9, 1); } bson_append_array_begin (command, "pipeline", 8, &pipeline); /* append the $changeStream stage. */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } /* Change streams spec: "startAtOperationTime and resumeAfter are mutually * exclusive; if both startAtOperationTime and resumeAfter are set, the * server will return an error. Drivers MUST NOT throw a custom error, and * MUST defer to the server error." */ if (!_mongoc_timestamp_empty (&stream->operation_time)) { _mongoc_timestamp_append ( &stream->operation_time, &change_stream_doc, "startAtOperationTime"); } if (stream->change_stream_type == MONGOC_CHANGE_STREAM_CLIENT) { bson_append_bool (&change_stream_doc, "allChangesForCluster", 20, true); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* the user pipeline may consist of invalid stages or non-documents. * append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
/** * mongoc_database_add_user: * @database: A #mongoc_database_t. * @username: A string containing the username. * @password: (allow-none): A string containing password, or NULL. * @roles: (allow-none): An optional bson_t of roles. * @custom_data: (allow-none): An optional bson_t of data to store. * @error: (out) (allow-none): A location for a bson_error_t or %NULL. * * Creates a new user with access to @database. * * Returns: None. * Side effects: None. */ bool mongoc_database_add_user (mongoc_database_t *database, const char *username, const char *password, const bson_t *roles, const bson_t *custom_data, bson_error_t *error) { bson_error_t lerror; bson_t cmd; bson_t ar; char *input; char *hashed_password; bool ret = false; ENTRY; BSON_ASSERT (database); BSON_ASSERT (username); /* * CDRIVER-232: * * Perform a (slow and tedious) round trip to mongod to determine if * we can safely call createUser. Otherwise, we will fallback and * perform legacy insertion into users collection. */ bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "usersInfo", username); ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror); bson_destroy (&cmd); if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) { ret = mongoc_database_add_user_legacy (database, username, password, error); } else if (ret || (lerror.code == 13)) { /* usersInfo succeeded or failed with auth err, we're on modern mongod */ input = bson_strdup_printf ("%s:mongo:%s", username, password); hashed_password = _mongoc_hex_md5 (input); bson_free (input); bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "createUser", username); BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password); BSON_APPEND_BOOL (&cmd, "digestPassword", false); if (custom_data) { BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data); } if (roles) { BSON_APPEND_ARRAY (&cmd, "roles", roles); } else { bson_append_array_begin (&cmd, "roles", 5, &ar); bson_append_array_end (&cmd, &ar); } ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error); bson_free (hashed_password); bson_destroy (&cmd); } else if (error) { memcpy (error, &lerror, sizeof *error); } RETURN (ret); }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }
/* Uses old way of querying system.namespaces. */ mongoc_cursor_t * _mongoc_database_find_collections_legacy (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_collection_t *col; mongoc_cursor_t *cursor = NULL; mongoc_read_prefs_t *read_prefs; uint32_t dbname_len; bson_t legacy_filter; bson_iter_t iter; const char *col_filter; bson_t q = BSON_INITIALIZER; mongoc_database_find_collections_legacy_ctx_t *ctx; BSON_ASSERT (database); col = mongoc_client_get_collection (database->client, database->name, "system.namespaces"); BSON_ASSERT (col); dbname_len = (uint32_t)strlen (database->name); ctx = (mongoc_database_find_collections_legacy_ctx_t *)bson_malloc (sizeof (*ctx)); ctx->dbname = database->name; ctx->dbname_len = dbname_len; /* Filtering on name needs to be handled differently for old servers. */ if (filter && bson_iter_init_find (&iter, filter, "name")) { bson_string_t *buf; /* on legacy servers, this must be a string (i.e. not a regex) */ if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE, "On legacy servers, a filter on name can only be a string."); bson_free (ctx); goto cleanup_filter; } BSON_ASSERT (BSON_ITER_HOLDS_UTF8 (&iter)); col_filter = bson_iter_utf8 (&iter, NULL); bson_init (&legacy_filter); bson_copy_to_excluding_noinit (filter, &legacy_filter, "name", NULL); /* We must db-qualify filters on name. */ buf = bson_string_new (database->name); bson_string_append_c (buf, '.'); bson_string_append (buf, col_filter); BSON_APPEND_UTF8 (&legacy_filter, "name", buf->str); bson_string_free (buf, true); filter = &legacy_filter; } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = mongoc_collection_find (col, MONGOC_QUERY_NONE, 0, 0, 0, filter ? filter : &q, NULL, read_prefs); _mongoc_cursor_transform_init ( cursor, _mongoc_database_find_collections_legacy_filter, _mongoc_database_find_collections_legacy_mutate, &bson_free, ctx); mongoc_read_prefs_destroy (read_prefs); cleanup_filter: mongoc_collection_destroy (col); return cursor; }
/** referesh a gridfs file's underlying page * * This unconditionally fetches the current page, even if the current page * covers the same theoretical chunk. */ static bool _mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file) { bson_t *query, *fields, child, child2; const bson_t *chunk; const char *key; bson_iter_t iter; uint32_t n; const uint8_t *data; uint32_t len; ENTRY; BSON_ASSERT (file); n = (uint32_t)(file->pos / file->chunk_size); if (file->page) { _mongoc_gridfs_file_page_destroy (file->page); file->page = NULL; } /* if the file pointer is past the end of the current file (I.e. pointing to * a new chunk) and we're on a chunk boundary, we'll pass the page * constructor a new empty page */ if ((int64_t)file->pos >= file->length && !(file->pos % file->chunk_size)) { data = (uint8_t *)""; len = 0; } else { /* if we have a cursor, but the cursor doesn't have the chunk we're going * to need, destroy it (we'll grab a new one immediately there after) */ if (file->cursor && !(file->cursor_range[0] >= n && file->cursor_range[1] <= n)) { mongoc_cursor_destroy (file->cursor); file->cursor = NULL; } if (!file->cursor) { query = bson_new (); bson_append_document_begin(query, "$query", -1, &child); bson_append_value (&child, "files_id", -1, &file->files_id); bson_append_document_begin (&child, "n", -1, &child2); bson_append_int32 (&child2, "$gte", -1, (int32_t)(file->pos / file->chunk_size)); bson_append_document_end (&child, &child2); bson_append_document_end(query, &child); bson_append_document_begin(query, "$orderby", -1, &child); bson_append_int32 (&child, "n", -1, 1); bson_append_document_end(query, &child); fields = bson_new (); bson_append_int32 (fields, "n", -1, 1); bson_append_int32 (fields, "data", -1, 1); bson_append_int32 (fields, "_id", -1, 0); /* find all chunks greater than or equal to our current file pos */ file->cursor = mongoc_collection_find (file->gridfs->chunks, MONGOC_QUERY_NONE, 0, 0, 0, query, fields, NULL); file->cursor_range[0] = n; file->cursor_range[1] = (uint32_t)(file->length / file->chunk_size); bson_destroy (query); bson_destroy (fields); BSON_ASSERT (file->cursor); } /* we might have had a cursor before, then seeked ahead past a chunk. * iterate until we're on the right chunk */ while (file->cursor_range[0] <= n) { if (!mongoc_cursor_next (file->cursor, &chunk)) { if (file->cursor->failed) { memcpy (&(file->error), &(file->cursor->error), sizeof (bson_error_t)); file->failed = true; } RETURN (0); } file->cursor_range[0]++; } bson_iter_init (&iter, chunk); /* grab out what we need from the chunk */ while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (strcmp (key, "n") == 0) { n = bson_iter_int32 (&iter); } else if (strcmp (key, "data") == 0) { bson_iter_binary (&iter, NULL, &len, &data); } else { RETURN (0); } } /* we're on the wrong chunk somehow... probably because our gridfs is * missing chunks. * * TODO: maybe we should make more noise here? */ if (!(n == file->pos / file->chunk_size)) { return 0; } } file->page = _mongoc_gridfs_file_page_new (data, len, file->chunk_size); /* seek in the page towards wherever we're supposed to be */ RETURN (_mongoc_gridfs_file_page_seek (file->page, file->pos % file->chunk_size)); }
bool _mongoc_client_command_simple_with_hint (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bool is_write_command, bson_t *reply, uint32_t hint, bson_error_t *error) { mongoc_cluster_t *cluster; mongoc_stream_t *stream; mongoc_server_description_t *sd; mongoc_ss_optype_t optype; bool reply_initialized = false; bool ret = false; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); cluster = &client->cluster; if (!hint) { optype = is_write_command ? MONGOC_SS_WRITE : MONGOC_SS_READ; sd = mongoc_cluster_select_by_optype (cluster, optype, read_prefs, error); if (!sd) { GOTO (done); } hint = sd->id; mongoc_server_description_destroy (sd); } stream = mongoc_cluster_fetch_stream (cluster, hint, true /* reconnect_ok */, error); if (!stream) { GOTO (done); } ret = mongoc_cluster_run_command_with_read_preference (cluster, stream, db_name, command, hint, read_prefs, is_write_command, reply, error); reply_initialized = true; done: if (!reply_initialized && reply) { bson_init (reply); } return ret; }
void mock_server_reply_multi (request_t *request, mongoc_reply_flags_t flags, const bson_t *docs, int n_docs, int64_t cursor_id) { const mongoc_rpc_t *request_rpc; mock_server_t *server; mongoc_stream_t *client; char *doc_json; bson_string_t *docs_json; mongoc_iovec_t *iov; mongoc_array_t ar; mongoc_rpc_t r = {{ 0 }}; size_t expected = 0; ssize_t n_written; int iovcnt; int i; uint8_t *buf; uint8_t *ptr; size_t len; BSON_ASSERT (request); BSON_ASSERT (docs); request_rpc = &request->request_rpc; server = request->server; client = request->client; docs_json = bson_string_new (""); for (i = 0; i < n_docs; i++) { doc_json = bson_as_json (&docs[i], NULL); bson_string_append (docs_json, doc_json); bson_free (doc_json); if (i < n_docs - 1) { bson_string_append (docs_json, ", "); } } if (mock_server_get_verbose (request->server)) { printf ("%5.2f %hu <- %hu \t%s\n", mock_server_get_uptime_sec (request->server), request->client_port, mock_server_get_port (request->server), docs_json->str); fflush (stdout); } len = 0; for (i = 0; i < n_docs; i++) { len += docs[i].len; } ptr = buf = bson_malloc (len); for (i = 0; i < n_docs; i++) { memcpy (ptr, bson_get_data (&docs[i]), docs[i].len); ptr += docs[i].len; } _mongoc_array_init (&ar, sizeof (mongoc_iovec_t)); mongoc_mutex_lock (&server->mutex); if (!(request->opcode == MONGOC_OPCODE_QUERY && request_rpc->query.flags & MONGOC_QUERY_EXHAUST)) { server->last_response_id++; } r.reply.request_id = server->last_response_id; mongoc_mutex_unlock (&server->mutex); r.reply.msg_len = 0; r.reply.response_to = request_rpc->header.request_id; r.reply.opcode = MONGOC_OPCODE_REPLY; r.reply.flags = flags; r.reply.cursor_id = cursor_id; r.reply.start_from = 0; r.reply.n_returned = 1; r.reply.documents = buf; r.reply.documents_len = (uint32_t)len; _mongoc_rpc_gather (&r, &ar); _mongoc_rpc_swab_to_le (&r); iov = (mongoc_iovec_t *)ar.data; iovcnt = (int) ar.len; for (i = 0; i < iovcnt; i++) { expected += iov[i].iov_len; } n_written = mongoc_stream_writev (client, iov, (size_t) iovcnt, -1); assert (n_written == expected); bson_string_free (docs_json, true); _mongoc_array_destroy (&ar); bson_free (buf); }
bool _mongoc_client_recv_gle (mongoc_client_t *client, uint32_t server_id, bson_t **gle_doc, bson_error_t *error) { mongoc_buffer_t buffer; mongoc_rpc_t rpc; bson_iter_t iter; bool ret = false; bson_t b; ENTRY; BSON_ASSERT (client); BSON_ASSERT (server_id); if (gle_doc) { *gle_doc = NULL; } _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL); if (!mongoc_cluster_try_recv (&client->cluster, &rpc, &buffer, server_id, error)) { mongoc_topology_invalidate_server(client->topology, server_id); GOTO (cleanup); } if (rpc.header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Received message other than OP_REPLY."); GOTO (cleanup); } if (_mongoc_rpc_reply_get_first (&rpc.reply, &b)) { if ((rpc.reply.flags & MONGOC_REPLY_QUERY_FAILURE)) { _bson_to_error (&b, error); bson_destroy (&b); GOTO (cleanup); } if (gle_doc) { *gle_doc = bson_copy (&b); } if (!bson_iter_init_find (&iter, &b, "ok") || BSON_ITER_HOLDS_DOUBLE (&iter)) { if (bson_iter_double (&iter) == 0.0) { _bson_to_error (&b, error); } } bson_destroy (&b); ret = true; } cleanup: _mongoc_buffer_destroy (&buffer); RETURN (ret); }
static ssize_t mongoc_stream_unix_writev (mongoc_stream_t *stream, struct iovec *iov, size_t iovcnt, bson_uint32_t timeout_msec) { mongoc_stream_unix_t *file = (mongoc_stream_unix_t *)stream; struct msghdr msg; struct pollfd fds; bson_int64_t now; bson_int64_t expire; size_t cur = 0; ssize_t written; ssize_t ret = 0; int flags = 0; int timeout; bson_return_val_if_fail(stream, -1); bson_return_val_if_fail(iov, -1); bson_return_val_if_fail(iovcnt, -1); /* * NOTE: See notes from mongoc_stream_unix_readv(), the apply here too. */ if (file->fd == -1) { errno = EBADF; return -1; } if (!timeout_msec) { timeout_msec = MONGOC_DEFAULT_TIMEOUT_MSEC; } /* * We require a monotonic clock for determining out timeout interval. This * is so that we are resilient to changes in the underlying wall clock, * such as during timezone changes. The monotonic clock is in microseconds * since an unknown epoch (but often system startup). */ expire = bson_get_monotonic_time() + (timeout_msec * 1000UL); /* * Prepare our pollfd. If POLLRDHUP is supported, we can get notified of * our peer hang-up. */ fds.fd = file->fd; fds.events = (POLLOUT | POLLERR | POLLHUP); for (;;) { /* * Build our message for recvmsg() taking into account that we may have * already done a short-read and must increment the iovec. */ msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov + cur; msg.msg_iovlen = iovcnt - cur; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; BSON_ASSERT(msg.msg_iov->iov_len); BSON_ASSERT(cur < iovcnt); fds.revents = 0; /* * Determine number of milliseconds until timeout expires. */ now = bson_get_monotonic_time(); timeout = MAX(0, (expire - now) / 1000UL); /* * Block on poll() until data is available or timeout. Upont timeout, * synthesize an errno of ETIMEDOUT. */ errno = 0; fds.revents = 0; written = poll(&fds, 1, timeout); if (written == -1) { return -1; } else if (written == 0) { errno = ETIMEDOUT; return -1; } /* * Perform sendmsg() on socket to send next chunk of data. If it turns * out this is not a socket, fall back to writev(). This should only * happen during unit tests. */ errno = 0; written = TEMP_FAILURE_RETRY(sendmsg(file->fd, &msg, flags)); if (written == -1 && errno == ENOTSOCK) { written = TEMP_FAILURE_RETRY(writev(file->fd, iov + cur, iovcnt - cur)); if (!written) { return ret; } } /* * If our recvmsg() failed, we can't do much now can we. */ if (written == -1) { return written; } else { ret += written; } BSON_ASSERT(cur < iovcnt); /* * Increment iovec's in the case we got a short read. Break out if * we have read all our expected data. */ while ((cur < iovcnt) && (written >= iov[cur].iov_len)) { BSON_ASSERT(iov[cur].iov_len); written -= iov[cur++].iov_len; BSON_ASSERT(cur <= iovcnt); } if (cur == iovcnt) { break; } iov[cur].iov_base = ((bson_uint8_t *)iov[cur].iov_base) + written; iov[cur].iov_len -= written; BSON_ASSERT(iovcnt - cur); BSON_ASSERT(iov[cur].iov_len); } return ret; }