bool _mongoc_convert_int64_positive (mongoc_client_t *client, const bson_iter_t *iter, int64_t *num, bson_error_t *error) { int64_t i; if (!BSON_ITER_HOLDS_NUMBER (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain number," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } i = bson_iter_as_int64 (iter); if (i <= 0) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should be greater than 0," " not %" PRId64, bson_iter_key (iter), i); } *num = bson_iter_as_int64 (iter); return true; }
/* * Start iterating the reply to an "aggregate", "find", "getMore" etc. command: * * {cursor: {id: 1234, ns: "db.collection", firstBatch: [...]}} */ bool _mongoc_cursor_cursorid_start_batch (mongoc_cursor_t *cursor) { mongoc_cursor_cursorid_t *cid; bson_iter_t iter; bson_iter_t child; const char *ns; uint32_t nslen; cid = (mongoc_cursor_cursorid_t *)cursor->iface_data; BSON_ASSERT (cid); if (bson_iter_init_find (&iter, &cid->array, "cursor") && BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child)) { while (bson_iter_next (&child)) { if (BSON_ITER_IS_KEY (&child, "id")) { cursor->rpc.reply.cursor_id = bson_iter_as_int64 (&child); } else if (BSON_ITER_IS_KEY (&child, "ns")) { ns = bson_iter_utf8 (&child, &nslen); _mongoc_set_cursor_ns (cursor, ns, nslen); } else if (BSON_ITER_IS_KEY (&child, "firstBatch") || BSON_ITER_IS_KEY (&child, "nextBatch")) { if (BSON_ITER_HOLDS_ARRAY (&child) && bson_iter_recurse (&child, &cid->batch_iter)) { cid->in_batch = true; } } } } return cid->in_batch; }
int be_mongo_superuser(void *conf, const char *username) { struct mongo_backend *handle = (struct mongo_backend *) conf; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; int result; bson_t query; bson_iter_t iter; bson_init (&query); bson_append_utf8(&query, "username", -1, username, -1); collection = mongoc_client_get_collection(handle->client, dbName, colName); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, superUser); result = (int64_t) bson_iter_as_int64(&iter); //_log(LOG_NOTICE, "SUPERUSER: %d", result); } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return result; } bson_destroy (&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); return result; }
bool _mongoc_convert_server_id (mongoc_client_t *client, const bson_iter_t *iter, uint32_t *server_id, bson_error_t *error) { int64_t tmp; if (!BSON_ITER_HOLDS_INT (iter)) { CONVERSION_ERR ("The serverId option must be an integer"); } tmp = bson_iter_as_int64 (iter); if (tmp <= 0) { CONVERSION_ERR ("The serverId option must be >= 1"); } *server_id = (uint32_t) tmp; return true; }
static void test_stats (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; bson_iter_t iter; bson_t stats; bson_t doc = BSON_INITIALIZER; bool r; client = mongoc_client_new (gTestUri); ASSERT (client); collection = get_test_collection (client, "test_stats"); ASSERT (collection); r = mongoc_collection_insert (collection, MONGOC_INSERT_NONE, &doc, NULL, &error); assert (r); r = mongoc_collection_stats (collection, NULL, &stats, &error); assert (r); assert (bson_iter_init_find (&iter, &stats, "ns")); assert (bson_iter_init_find (&iter, &stats, "count")); assert (bson_iter_as_int64 (&iter) >= 1); bson_destroy (&stats); r = mongoc_collection_drop (collection, &error); assert (r); mongoc_collection_destroy (collection); mongoc_client_destroy (client); bson_destroy (&doc); }
bool _mongoc_convert_int32_t (mongoc_client_t *client, const bson_iter_t *iter, int32_t *num, bson_error_t *error) { int64_t i; if (!BSON_ITER_HOLDS_NUMBER (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts", bson_iter_key (iter)); } i = bson_iter_as_int64 (iter); if (i > INT32_MAX || i < INT32_MIN) { CONVERSION_ERR ("Invalid field \"%s\" in opts: %" PRId64 " out of range for int32", bson_iter_key (iter), i); } *num = (int32_t) i; return true; }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }
int be_mongo_aclcheck(void *conf, const char *clientid, const char *username, const char *topic, int acc) { struct mongo_backend *handle = (struct mongo_backend *) conf; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; bson_iter_t iter; bool check = false; int match = 0, foundFlag = 0; bson_t query; bson_init(&query); bson_append_utf8(&query, "username", -1, username, -1); collection = mongoc_client_get_collection(handle->client, dbName, colName); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (foundFlag == 0 && mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, topicLoc); int64_t topId = (int64_t) bson_iter_as_int64(&iter);//, NULL); bson_destroy(&query); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); bson_init(&query); bson_append_int64(&query, topicID, -1, topId); collection = mongoc_client_get_collection(handle->client, dbName, topicLoc); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); foundFlag = 1; } if (foundFlag == 1 && mongoc_cursor_next(cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, topicLoc); uint32_t len; const uint8_t *arr; bson_iter_array(&iter, &len, &arr); bson_t b; if (bson_init_static(&b, arr, len)) { bson_iter_init(&iter, &b); while (bson_iter_next(&iter)) { char *str = bson_iter_dup_utf8(&iter, &len); mosquitto_topic_matches_sub(str, topic, &check); if (check) { match = 1; bson_free(str); break; } bson_free(str); } } } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return 0; } bson_destroy(&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy(collection); return match; }
int be_mongo_aclcheck(void *conf, const char *clientid, const char *username, const char *topic, int acc) { struct mongo_backend *handle = (struct mongo_backend *) conf; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; bson_iter_t iter; int match = 0; const bson_oid_t *topic_lookup_oid = NULL; const char *topic_lookup_utf8 = NULL; int64_t topic_lookup_int64 = 0; bson_t query; bson_init(&query); bson_append_utf8(&query, handle->user_username_prop, -1, username, -1); collection = mongoc_client_get_collection(handle->client, handle->database, handle->user_coll); cursor = mongoc_collection_find_with_opts(collection, &query, NULL, NULL); if (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_next (cursor, &doc)) { // First find any user[handle->user_topiclist_fk_prop] if (bson_iter_init_find(&iter, doc, handle->user_topiclist_fk_prop)) { bson_type_t loc_id_type = bson_iter_type(&iter); if (loc_id_type == BSON_TYPE_OID) { topic_lookup_oid = bson_iter_oid(&iter); } else if (loc_id_type == BSON_TYPE_INT32 || loc_id_type == BSON_TYPE_INT64) { topic_lookup_int64 = bson_iter_as_int64(&iter); } else if (loc_id_type == BSON_TYPE_UTF8) { topic_lookup_utf8 = bson_iter_utf8(&iter, NULL); } } // Look through the props from the beginning for user[handle->user_topics_prop] if (bson_iter_init_find(&iter, doc, handle->user_topics_prop)) { bson_type_t embedded_prop_type = bson_iter_type(&iter); if (embedded_prop_type == BSON_TYPE_ARRAY) { match = be_mongo_check_acl_topics_array(&iter, topic, clientid, username); } else if (embedded_prop_type == BSON_TYPE_DOCUMENT) { match = be_mongo_check_acl_topics_map(&iter, topic, acc, clientid, username); } } } if ((mongoc_cursor_error (cursor, &error)) && (match != 1)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); } bson_destroy(&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy(collection); if (!match && (topic_lookup_oid != NULL || topic_lookup_int64 != 0 || topic_lookup_utf8 != NULL)) { bson_init(&query); if (topic_lookup_oid != NULL) { bson_append_oid(&query, handle->topiclist_key_prop, -1, topic_lookup_oid); } else if (topic_lookup_int64 != 0) { bson_append_int64(&query, handle->topiclist_key_prop, -1, topic_lookup_int64); } else if (topic_lookup_utf8 != NULL) { bson_append_utf8(&query, handle->topiclist_key_prop, -1, topic_lookup_utf8, -1); } collection = mongoc_client_get_collection(handle->client, handle->database, handle->topiclist_coll); cursor = mongoc_collection_find_with_opts(collection, &query, NULL, NULL); if (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_next(cursor, &doc)) { bson_iter_init(&iter, doc); if (bson_iter_find(&iter, handle->topiclist_topics_prop)) { bson_type_t loc_prop_type = bson_iter_type(&iter); if (loc_prop_type == BSON_TYPE_ARRAY) { match = be_mongo_check_acl_topics_array(&iter, topic, clientid, username); } else if (loc_prop_type == BSON_TYPE_DOCUMENT) { match = be_mongo_check_acl_topics_map(&iter, topic, acc, clientid, username); } } else { _log(LOG_NOTICE, "[mongo] ACL check error - no topic list found for user (%s) in collection (%s)", username, handle->topiclist_coll); } } if ((mongoc_cursor_error (cursor, &error)) && (match != 1)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); } bson_destroy(&query); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); } return (match) ? BACKEND_ALLOW : BACKEND_DEFER; }
void _mongoc_write_command_delete_legacy ( mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; const uint8_t *data; mongoc_rpc_t rpc; uint32_t request_id; bson_iter_t q_iter; uint32_t len; int64_t limit = 0; bson_t *gle = NULL; char ns[MONGOC_NAMESPACE_MAX + 1]; bool r; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_DELETE_FAILED, "Cannot do an empty delete."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { /* the document is like { "q": { <selector> }, limit: <0 or 1> } */ r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") && BSON_ITER_HOLDS_DOCUMENT (&q_iter)); BSON_ASSERT (r); bson_iter_document (&q_iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size, NULL); result->failed = true; bson_reader_destroy (reader); EXIT; } request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_DELETE; rpc.delete_.zero = 0; rpc.delete_.collection = ns; if (bson_iter_find (&q_iter, "limit") && (BSON_ITER_HOLDS_INT (&q_iter))) { limit = bson_iter_as_int64 (&q_iter); } rpc.delete_.flags = limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE; rpc.delete_.selector = data; _mongoc_monitor_legacy_write (client, command, database, collection, write_concern, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, write_concern, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } if (mongoc_write_concern_is_acknowledged (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_write_result_merge_legacy ( result, command, gle, client->error_api_version, MONGOC_ERROR_COLLECTION_DELETE_FAILED, offset); offset++; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, gle, server_stream, request_id); if (gle) { bson_destroy (gle); gle = NULL; } started = bson_get_monotonic_time (); } bson_reader_destroy (reader); EXIT; }
/* fire command-succeeded event as if we'd used a modern write command. * note, cluster.request_id was incremented once for the write, again * for the getLastError, so cluster.request_id is no longer valid; used the * passed-in request_id instead. */ static void _mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client, int64_t duration, mongoc_write_command_t *command, const bson_t *gle, mongoc_server_stream_t *stream, int64_t request_id) { bson_iter_t iter; bson_t doc; int64_t ok = 1; int64_t n = 0; uint32_t code = 8; bool wtimeout = false; /* server error message */ const char *errmsg = NULL; size_t errmsg_len = 0; /* server errInfo subdocument */ bool has_errinfo = false; uint32_t len; const uint8_t *data; bson_t errinfo; /* server upsertedId value */ bool has_upserted_id = false; bson_value_t upserted_id; /* server updatedExisting value */ bool has_updated_existing = false; bool updated_existing = false; mongoc_apm_command_succeeded_t event; ENTRY; if (!client->apm_callbacks.succeeded) { EXIT; } /* first extract interesting fields from getlasterror response */ if (gle) { bson_iter_init (&iter, gle); while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "ok")) { ok = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "n")) { n = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "code")) { code = (uint32_t) bson_iter_as_int64 (&iter); if (code == 0) { /* server sent non-numeric error code? */ code = 8; } } else if (!strcmp (bson_iter_key (&iter), "upserted")) { has_upserted_id = true; bson_value_copy (bson_iter_value (&iter), &upserted_id); } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) { has_updated_existing = true; updated_existing = bson_iter_as_bool (&iter); } else if ((!strcmp (bson_iter_key (&iter), "err") || !strcmp (bson_iter_key (&iter), "errmsg")) && BSON_ITER_HOLDS_UTF8 (&iter)) { errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len); } else if (!strcmp (bson_iter_key (&iter), "errInfo") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); bson_init_static (&errinfo, data, len); has_errinfo = true; } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) { wtimeout = true; } } } /* based on PyMongo's _convert_write_result() */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, (int32_t) ok); if (errmsg && !wtimeout) { /* Failure, but pass to the success callback. Command Monitoring Spec: * "Commands that executed on the server and return a status of {ok: 1} * are considered successful commands and fire CommandSucceededEvent. * Commands that have write errors are included since the actual command * did succeed, only writes failed." */ append_write_err ( &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL); } else { /* Success, perhaps with a writeConcernError. */ if (errmsg) { append_write_concern_err (&doc, errmsg, errmsg_len); } if (command->type == MONGOC_WRITE_COMMAND_INSERT) { /* GLE result for insert is always 0 in most MongoDB versions. */ n = command->n_documents; } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) { if (has_upserted_id) { append_upserted (&doc, &upserted_id); } else if (has_updated_existing && !updated_existing && n == 1) { bson_t tmp; int32_t bson_len = 0; memcpy (&bson_len, command->payload.data, 4); bson_len = BSON_UINT32_FROM_LE (bson_len); bson_init_static (&tmp, command->payload.data, bson_len); has_upserted_id = get_upserted_id (&tmp, &upserted_id); if (has_upserted_id) { append_upserted (&doc, &upserted_id); } } } } bson_append_int32 (&doc, "n", 1, (int32_t) n); mongoc_apm_command_succeeded_init ( &event, duration, &doc, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); if (has_upserted_id) { bson_value_destroy (&upserted_id); } EXIT; }
mongoc_change_stream_t * _mongoc_change_stream_new (const mongoc_collection_t *coll, const bson_t *pipeline, const bson_t *opts) { bool full_doc_set = false; mongoc_change_stream_t *stream = (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t)); BSON_ASSERT (coll); BSON_ASSERT (pipeline); stream->max_await_time_ms = -1; stream->batch_size = -1; stream->coll = mongoc_collection_copy ((mongoc_collection_t *) coll); bson_init (&stream->pipeline_to_append); bson_init (&stream->full_document); bson_init (&stream->opts); bson_init (&stream->resume_token); bson_init (&stream->err_doc); /* * The passed options may consist of: * fullDocument: 'default'|'updateLookup', passed to $changeStream stage * resumeAfter: optional<Doc>, passed to $changeStream stage * maxAwaitTimeMS: Optional<Int64>, set on the cursor * batchSize: Optional<Int32>, passed as agg option, {cursor: { batchSize: }} * standard command options like "sessionId", "maxTimeMS", or "collation" */ if (opts) { bson_iter_t iter; if (bson_iter_init_find (&iter, opts, "fullDocument")) { SET_BSON_OR_ERR (&stream->full_document, "fullDocument"); full_doc_set = true; } if (bson_iter_init_find (&iter, opts, "resumeAfter")) { SET_BSON_OR_ERR (&stream->resume_token, "resumeAfter"); } if (bson_iter_init_find (&iter, opts, "batchSize")) { if (BSON_ITER_HOLDS_INT32 (&iter)) { stream->batch_size = bson_iter_int32 (&iter); } } if (bson_iter_init_find (&iter, opts, "maxAwaitTimeMS") && BSON_ITER_HOLDS_INT (&iter)) { stream->max_await_time_ms = bson_iter_as_int64 (&iter); } /* save the remaining opts for mongoc_collection_read_command_with_opts */ bson_copy_to_excluding_noinit (opts, &stream->opts, "fullDocument", "resumeAfter", "batchSize", "maxAwaitTimeMS", NULL); } if (!full_doc_set) { if (!BSON_APPEND_UTF8 ( &stream->full_document, "fullDocument", "default")) { CHANGE_STREAM_ERR ("fullDocument"); } } /* Accept two forms of user pipeline: * 1. A document like: { "pipeline": [...] } * 2. An array-like document: { "0": {}, "1": {}, ... } * If the passed pipeline is invalid, we pass it along and let the server * error instead. */ if (!bson_empty (pipeline)) { bson_iter_t iter; if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { SET_BSON_OR_ERR (&stream->pipeline_to_append, "pipeline"); } else { if (!BSON_APPEND_ARRAY ( &stream->pipeline_to_append, "pipeline", pipeline)) { CHANGE_STREAM_ERR ("pipeline"); } } } if (stream->err.code == 0) { (void) _make_cursor (stream); } return stream; }