static void ha_replica_set_configure (ha_replica_set_t *replica_set, ha_node_t *primary) { mongoc_database_t *database; mongoc_client_t *client; mongoc_cursor_t *cursor; const bson_t *doc; bson_error_t error; bson_iter_t iter; ha_node_t *node; bson_t ar; bson_t cmd; bson_t config; bson_t member; char *str; char *uristr; char hoststr[32]; char key[8]; int i = 0; uristr = bson_strdup_printf("mongodb://127.0.0.1:%hu/", primary->port); client = mongoc_client_new(uristr); #ifdef MONGOC_ENABLE_SSL if (replica_set->ssl_opt) { mongoc_client_set_ssl_opts(client, replica_set->ssl_opt); } #endif bson_free(uristr); bson_init(&cmd); bson_append_document_begin(&cmd, "replSetInitiate", -1, &config); bson_append_utf8(&config, "_id", 3, replica_set->name, -1); bson_append_array_begin(&config, "members", -1, &ar); for (node = replica_set->nodes; node; node = node->next) { snprintf(key, sizeof key, "%u", i); key[sizeof key - 1] = '\0'; snprintf(hoststr, sizeof hoststr, "127.0.0.1:%hu", node->port); hoststr[sizeof hoststr - 1] = '\0'; bson_append_document_begin(&ar, key, -1, &member); bson_append_int32(&member, "_id", -1, i); bson_append_utf8(&member, "host", -1, hoststr, -1); bson_append_bool(&member, "arbiterOnly", -1, node->is_arbiter); bson_append_document_end(&ar, &member); i++; } bson_append_array_end(&config, &ar); bson_append_document_end(&cmd, &config); str = bson_as_json(&cmd, NULL); MONGOC_DEBUG("Config: %s", str); bson_free(str); database = mongoc_client_get_database(client, "admin"); again: cursor = mongoc_database_command(database, MONGOC_QUERY_NONE, 0, 1, &cmd, NULL, NULL); while (mongoc_cursor_next(cursor, &doc)) { str = bson_as_json(doc, NULL); MONGOC_DEBUG("Reply: %s", str); bson_free(str); if (bson_iter_init_find(&iter, doc, "ok") && bson_iter_as_bool(&iter)) { goto cleanup; } } if (mongoc_cursor_error(cursor, &error)) { mongoc_cursor_destroy(cursor); MONGOC_WARNING("%s: Retrying in 1 second.", error.message); sleep(1); goto again; } cleanup: mongoc_cursor_destroy(cursor); mongoc_database_destroy(database); mongoc_client_destroy(client); bson_destroy(&cmd); }
static mongoc_stream_t * mongoc_client_default_stream_initiator (const mongoc_uri_t *uri, const mongoc_host_list_t *host, void *user_data, bson_error_t *error) { mongoc_stream_t *base_stream = NULL; #ifdef MONGOC_ENABLE_SSL mongoc_client_t *client = user_data; const bson_t *options; bson_iter_t iter; const char *mechanism; #endif bson_return_val_if_fail (uri, NULL); bson_return_val_if_fail (host, NULL); #ifndef MONGOC_ENABLE_SSL if (mongoc_uri_get_ssl (uri)) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER, "SSL is not enabled in this build of mongo-c-driver."); return NULL; } #endif switch (host->family) { #if defined(AF_INET6) case AF_INET6: #endif case AF_INET: base_stream = mongoc_client_connect_tcp (uri, host, error); break; case AF_UNIX: base_stream = mongoc_client_connect_unix (uri, host, error); break; default: bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_INVALID_TYPE, "Invalid address family: 0x%02x", host->family); break; } #ifdef MONGOC_ENABLE_SSL if (base_stream) { options = mongoc_uri_get_options (uri); mechanism = mongoc_uri_get_auth_mechanism (uri); if ((bson_iter_init_find_case (&iter, options, "ssl") && bson_iter_as_bool (&iter)) || (mechanism && (0 == strcmp (mechanism, "MONGODB-X509")))) { base_stream = mongoc_stream_tls_new (base_stream, &client->ssl_opts, true); if (!base_stream) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed initialize TLS state."); return NULL; } if (!mongoc_stream_tls_do_handshake (base_stream, -1) || !mongoc_stream_tls_check_cert (base_stream, host->host)) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to handshake and validate TLS certificate."); mongoc_stream_destroy (base_stream); base_stream = NULL; return NULL; } } } #endif return base_stream ? mongoc_stream_buffered_new (base_stream, 1024) : NULL; }
void _mongoc_write_command_insert_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; uint32_t current_offset; mongoc_iovec_t *iov; mongoc_rpc_t rpc; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (mongoc_write_concern_is_acknowledged (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (gle) { _mongoc_write_result_merge_legacy (result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }
/* fire command-succeeded event as if we'd used a modern write command. * note, cluster.request_id was incremented once for the write, again * for the getLastError, so cluster.request_id is no longer valid; used the * passed-in request_id instead. */ static void _mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client, int64_t duration, mongoc_write_command_t *command, const bson_t *gle, mongoc_server_stream_t *stream, int64_t request_id) { bson_iter_t iter; bson_t doc; int64_t ok = 1; int64_t n = 0; uint32_t code = 8; bool wtimeout = false; /* server error message */ const char *errmsg = NULL; size_t errmsg_len = 0; /* server errInfo subdocument */ bool has_errinfo = false; uint32_t len; const uint8_t *data; bson_t errinfo; /* server upsertedId value */ bool has_upserted_id = false; bson_value_t upserted_id; /* server updatedExisting value */ bool has_updated_existing = false; bool updated_existing = false; mongoc_apm_command_succeeded_t event; ENTRY; if (!client->apm_callbacks.succeeded) { EXIT; } /* first extract interesting fields from getlasterror response */ if (gle) { bson_iter_init (&iter, gle); while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "ok")) { ok = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "n")) { n = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "code")) { code = (uint32_t) bson_iter_as_int64 (&iter); if (code == 0) { /* server sent non-numeric error code? */ code = 8; } } else if (!strcmp (bson_iter_key (&iter), "upserted")) { has_upserted_id = true; bson_value_copy (bson_iter_value (&iter), &upserted_id); } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) { has_updated_existing = true; updated_existing = bson_iter_as_bool (&iter); } else if ((!strcmp (bson_iter_key (&iter), "err") || !strcmp (bson_iter_key (&iter), "errmsg")) && BSON_ITER_HOLDS_UTF8 (&iter)) { errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len); } else if (!strcmp (bson_iter_key (&iter), "errInfo") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); bson_init_static (&errinfo, data, len); has_errinfo = true; } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) { wtimeout = true; } } } /* based on PyMongo's _convert_write_result() */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, (int32_t) ok); if (errmsg && !wtimeout) { /* Failure, but pass to the success callback. Command Monitoring Spec: * "Commands that executed on the server and return a status of {ok: 1} * are considered successful commands and fire CommandSucceededEvent. * Commands that have write errors are included since the actual command * did succeed, only writes failed." */ append_write_err ( &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL); } else { /* Success, perhaps with a writeConcernError. */ if (errmsg) { append_write_concern_err (&doc, errmsg, errmsg_len); } if (command->type == MONGOC_WRITE_COMMAND_INSERT) { /* GLE result for insert is always 0 in most MongoDB versions. */ n = command->n_documents; } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) { if (has_upserted_id) { append_upserted (&doc, &upserted_id); } else if (has_updated_existing && !updated_existing && n == 1) { bson_t tmp; int32_t bson_len = 0; memcpy (&bson_len, command->payload.data, 4); bson_len = BSON_UINT32_FROM_LE (bson_len); bson_init_static (&tmp, command->payload.data, bson_len); has_upserted_id = get_upserted_id (&tmp, &upserted_id); if (has_upserted_id) { append_upserted (&doc, &upserted_id); } } } } bson_append_int32 (&doc, "n", 1, (int32_t) n); mongoc_apm_command_succeeded_init ( &event, duration, &doc, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); if (has_upserted_id) { bson_value_destroy (&upserted_id); } EXIT; }
void _mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ int32_t error_api_version, mongoc_error_code_t default_code, uint32_t offset) { const bson_value_t *value; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; const char *err = NULL; int32_t code = 0; int32_t n = 0; int32_t upsert_idx = 0; mongoc_error_domain_t domain; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_COLLECTION; if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { n = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "err") && BSON_ITER_HOLDS_UTF8 (&iter)) { err = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find (&iter, reply, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (_is_duplicate_key_error (code)) { code = MONGOC_ERROR_DUPLICATE_KEY; } if (code || err) { if (!err) { err = "unknown error"; } if (bson_iter_init_find (&iter, reply, "wtimeout") && bson_iter_as_bool (&iter)) { if (!code) { code = (int32_t) MONGOC_ERROR_WRITE_CONCERN_ERROR; } _append_write_concern_err_legacy (result, err, code); } else { if (!code) { code = (int32_t) default_code; } _append_write_err_legacy (result, err, domain, code, offset); } } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: if (n) { result->nInserted += n; } break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += n; break; case MONGOC_WRITE_COMMAND_UPDATE: if (bson_iter_init_find (&iter, reply, "upserted") && !BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; value = bson_iter_value (&iter); _mongoc_write_result_append_upsert (result, offset, value); } else if (bson_iter_init_find (&iter, reply, "upserted") && BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; if (bson_iter_recurse (&iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + upsert_idx, value); upsert_idx++; } } } } else if ((n == 1) && bson_iter_init_find (&iter, reply, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) { result->nUpserted += n; } else { result->nMatched += n; } break; default: break; } result->omit_nModified = true; EXIT; }
void mongoc_server_description_handle_ismaster ( mongoc_server_description_t *sd, const bson_t *ismaster_response, int64_t rtt_msec, bson_error_t *error) { bson_iter_t iter; bool is_master = false; bool is_shard = false; bool is_secondary = false; bool is_arbiter = false; bool is_replicaset = false; bool is_hidden = false; const uint8_t *bytes; uint32_t len; int num_keys = 0; ENTRY; BSON_ASSERT (sd); mongoc_server_description_reset (sd); if (!ismaster_response) { EXIT; } bson_destroy (&sd->last_is_master); bson_copy_to (ismaster_response, &sd->last_is_master); sd->has_is_master = true; bson_iter_init (&iter, &sd->last_is_master); while (bson_iter_next (&iter)) { num_keys++; if (strcmp ("ok", bson_iter_key (&iter)) == 0) { /* ismaster responses never have ok: 0, but spec requires we check */ if (! bson_iter_as_bool (&iter)) goto failure; } else if (strcmp ("ismaster", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_master = bson_iter_bool (&iter); } else if (strcmp ("maxMessageSizeBytes", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_msg_size = bson_iter_int32 (&iter); } else if (strcmp ("maxBsonObjectSize", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_bson_obj_size = bson_iter_int32 (&iter); } else if (strcmp ("maxWriteBatchSize", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_write_batch_size = bson_iter_int32 (&iter); } else if (strcmp ("minWireVersion", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->min_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("maxWireVersion", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("msg", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; is_shard = !!bson_iter_utf8 (&iter, NULL); } else if (strcmp ("setName", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->set_name = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("secondary", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_secondary = bson_iter_bool (&iter); } else if (strcmp ("hosts", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->hosts, bytes, len); } else if (strcmp ("passives", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->passives, bytes, len); } else if (strcmp ("arbiters", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->arbiters, bytes, len); } else if (strcmp ("primary", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->current_primary = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("arbiterOnly", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_arbiter = bson_iter_bool (&iter); } else if (strcmp ("isreplicaset", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_replicaset = bson_iter_bool (&iter); } else if (strcmp ("tags", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_DOCUMENT (&iter)) goto failure; bson_iter_document (&iter, &len, &bytes); bson_init_static (&sd->tags, bytes, len); } else if (strcmp ("hidden", bson_iter_key (&iter)) == 0) { is_hidden = bson_iter_bool (&iter); } } if (is_shard) { sd->type = MONGOC_SERVER_MONGOS; } else if (sd->set_name) { if (is_hidden) { sd->type = MONGOC_SERVER_RS_OTHER; } else if (is_master) { sd->type = MONGOC_SERVER_RS_PRIMARY; } else if (is_secondary) { sd->type = MONGOC_SERVER_RS_SECONDARY; } else if (is_arbiter) { sd->type = MONGOC_SERVER_RS_ARBITER; } else { sd->type = MONGOC_SERVER_RS_OTHER; } } else if (is_replicaset) { sd->type = MONGOC_SERVER_RS_GHOST; } else if (num_keys > 0) { sd->type = MONGOC_SERVER_STANDALONE; } else { sd->type = MONGOC_SERVER_UNKNOWN; } mongoc_server_description_update_rtt(sd, rtt_msec); EXIT; failure: sd->type = MONGOC_SERVER_UNKNOWN; sd->round_trip_time = -1; EXIT; }
bool _mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; uint32_t buflen = 0; mongoc_cyrus_t sasl; bson_iter_t iter; bool ret = false; const char *tmpstr; uint8_t buf[4096] = {0}; bson_t cmd; bson_t reply; int conv_id = 0; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!_mongoc_cyrus_new_from_cluster ( &sasl, cluster, stream, sd->host.host, error)) { return false; } for (;;) { mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); if (!_mongoc_cyrus_step ( &sasl, buf, buflen, buf, sizeof buf, &buflen, error)) { goto failure; } bson_init (&cmd); if (sasl.step == 1) { _mongoc_cluster_build_sasl_start ( &cmd, sasl.credentials.mechanism, (const char *) buf, buflen); } else { _mongoc_cluster_build_sasl_continue ( &cmd, conv_id, (const char *) buf, buflen); } TRACE ("SASL: authenticating (step %d)", sasl.step); server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); goto failure; } if (!mongoc_cluster_run_command_private ( cluster, &parts.assembled, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); bson_destroy (&reply); goto failure; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } conv_id = _mongoc_cluster_get_conversation_id (&reply); if (!bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_UTF8 (&iter)) { MONGOC_DEBUG ("SASL: authentication failed"); bson_destroy (&reply); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SASL reply from MongoDB server."); goto failure; } tmpstr = bson_iter_utf8 (&iter, &buflen); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL reply from MongoDB is too large."); bson_destroy (&reply); goto failure; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } TRACE ("%s", "SASL: authenticated"); ret = true; failure: _mongoc_cyrus_destroy (&sasl); mongoc_cmd_parts_cleanup (&parts); return ret; }