mongoc_uri_t * mongoc_uri_copy (const mongoc_uri_t *uri) { mongoc_uri_t *copy; mongoc_host_list_t *iter; BSON_ASSERT (uri); copy = (mongoc_uri_t *)bson_malloc0(sizeof (*copy)); copy->str = bson_strdup (uri->str); copy->username = bson_strdup (uri->username); copy->password = bson_strdup (uri->password); copy->database = bson_strdup (uri->database); copy->read_prefs = mongoc_read_prefs_copy (uri->read_prefs); copy->read_concern = mongoc_read_concern_copy (uri->read_concern); copy->write_concern = mongoc_write_concern_copy (uri->write_concern); for (iter = uri->hosts; iter; iter = iter->next) { mongoc_uri_append_host (copy, iter->host, iter->port); } bson_copy_to (&uri->options, ©->options); bson_copy_to (&uri->credentials, ©->credentials); return copy; }
mongoc_cursor_t * _mongoc_cursor_clone (const mongoc_cursor_t *cursor) { mongoc_cursor_t *clone; ENTRY; BSON_ASSERT (cursor); clone = bson_malloc0 (sizeof *clone); clone->client = cursor->client; clone->is_command = cursor->is_command; clone->flags = cursor->flags; clone->skip = cursor->skip; clone->batch_size = cursor->batch_size; clone->limit = cursor->limit; clone->nslen = cursor->nslen; if (cursor->read_prefs) { clone->read_prefs = mongoc_read_prefs_copy (cursor->read_prefs); } bson_copy_to (&cursor->query, &clone->query); bson_copy_to (&cursor->fields, &clone->fields); memcpy (clone->ns, cursor->ns, sizeof clone->ns); _mongoc_buffer_init (&clone->buffer, NULL, 0, NULL); mongoc_counter_cursors_active_inc (); RETURN(clone); }
static void test_bson_copy_to (void) { bson_t b; bson_t c; int i; /* * Test inline structure copy. */ bson_init(&b); assert(bson_append_int32(&b, "foobar", -1, 1234)); bson_copy_to(&b, &c); assert_bson_equal(&b, &c); bson_destroy(&c); bson_destroy(&b); /* * Test malloced copy. */ bson_init(&b); for (i = 0; i < 1000; i++) { assert(bson_append_int32(&b, "foobar", -1, 1234)); } bson_copy_to(&b, &c); assert_bson_equal(&b, &c); bson_destroy(&c); bson_destroy(&b); }
/** * _mongoc_gridfs_file_new: * * Create a new empty gridfs file */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt) { mongoc_gridfs_file_t *file; mongoc_gridfs_file_opt_t default_opt = { 0 }; ENTRY; BSON_ASSERT (gridfs); if (!opt) { opt = &default_opt; } file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; file->is_dirty = 1; if (opt->chunk_size) { file->chunk_size = opt->chunk_size; } else { /* * The default chunk size is now 255kb. This used to be 256k but has been * reduced to allow for them to fit within power of two sizes in mongod. * * See CDRIVER-322. */ file->chunk_size = (1 << 18) - 1024; } file->files_id.value_type = BSON_TYPE_OID; bson_oid_init (&file->files_id.value.v_oid, NULL); file->upload_date = time (NULL) * 1000; if (opt->md5) { file->md5 = bson_strdup (opt->md5); } if (opt->filename) { file->filename = bson_strdup (opt->filename); } if (opt->content_type) { file->content_type = bson_strdup (opt->content_type); } if (opt->aliases) { bson_copy_to (opt->aliases, &(file->aliases)); } if (opt->metadata) { bson_copy_to (opt->metadata, &(file->metadata)); } RETURN (file); }
/** * _mongoc_gridfs_file_new: * * Create a new empty gridfs file */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt) { mongoc_gridfs_file_t *file; mongoc_gridfs_file_opt_t default_opt = { 0 }; ENTRY; BSON_ASSERT (gridfs); if (!opt) { opt = &default_opt; } file = bson_malloc0 (sizeof *file); file->gridfs = gridfs; file->is_dirty = 1; if (opt->chunk_size) { file->chunk_size = opt->chunk_size; } else { /** default chunk size is 256k */ file->chunk_size = 2 << 17; } bson_oid_init (&file->files_id, NULL); file->upload_date = time (NULL) * 1000; if (opt->md5) { file->md5 = bson_strdup (opt->md5); } if (opt->filename) { file->filename = bson_strdup (opt->filename); } if (opt->content_type) { file->content_type = bson_strdup (opt->content_type); } if (opt->aliases) { bson_copy_to (opt->aliases, &(file->aliases)); } if (opt->metadata) { bson_copy_to (opt->metadata, &(file->metadata)); } RETURN (file); }
static void mongoc_uri_bson_append_or_replace_key (bson_t *options, const char *option, const char *value) { bson_iter_t iter; bool found = false; if (bson_iter_init (&iter, options)) { bson_t tmp = BSON_INITIALIZER; while (bson_iter_next (&iter)) { const bson_value_t *bvalue; if (!strcasecmp(bson_iter_key (&iter), option)) { bson_append_utf8(&tmp, option, -1, value, -1); found = true; continue; } bvalue = bson_iter_value (&iter); BSON_APPEND_VALUE (&tmp, bson_iter_key (&iter), bvalue); } if (! found) { bson_append_utf8(&tmp, option, -1, value, -1); } bson_destroy (options); bson_copy_to (&tmp, options); bson_destroy (&tmp); } }
mongoc_matcher_t * mongoc_matcher_new (const bson_t *query, /* IN */ bson_error_t *error) /* OUT */ { mongoc_matcher_op_t *op; mongoc_matcher_t *matcher; bson_iter_t iter; BSON_ASSERT (query); matcher = bson_malloc0 (sizeof *matcher); bson_copy_to (query, &matcher->query); if (!bson_iter_init (&iter, &matcher->query)) { goto failure; } if (!(op = _mongoc_matcher_parse_logical (MONGOC_MATCHER_OPCODE_AND, &iter, true, error))) { goto failure; } matcher->optree = op; return matcher; failure: bson_destroy (&matcher->query); bson_free (matcher); return NULL; }
bool _mongoc_convert_document (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error) { uint32_t len; const uint8_t *data; bson_t value; if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain document," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bson_iter_document (iter, &len, &data); if (!bson_init_static (&value, data, len)) { BSON_ERR ("Corrupt BSON in field \"%s\" in opts", bson_iter_key (iter)); } bson_destroy (doc); bson_copy_to (&value, doc); return true; }
void _mongoc_write_command_init_bulk (mongoc_write_command_t *command, int type, mongoc_bulk_write_flags_t flags, int64_t operation_id, const bson_t *opts) { ENTRY; BSON_ASSERT (command); command->type = type; command->flags = flags; command->operation_id = operation_id; if (!bson_empty0 (opts)) { bson_copy_to (opts, &command->cmd_opts); } else { bson_init (&command->cmd_opts); } _mongoc_buffer_init (&command->payload, NULL, 0, NULL, NULL); command->n_documents = 0; EXIT; }
/* enqueue server reply for this connection's worker thread to send to client */ void mock_server_reply_multi (request_t *request, mongoc_reply_flags_t flags, const bson_t *docs, int n_docs, int64_t cursor_id) { reply_t *reply; int i; BSON_ASSERT (request); reply = bson_malloc0 (sizeof (reply_t)); reply->flags = flags; reply->n_docs = n_docs; reply->docs = bson_malloc0 (n_docs * sizeof (bson_t)); for (i = 0; i < n_docs; i++) { bson_copy_to (&docs[i], &reply->docs[i]); } reply->cursor_id = cursor_id; reply->client_port = request_get_client_port (request); reply->request_opcode = (mongoc_opcode_t) request->request_rpc.header.opcode; reply->query_flags = (mongoc_query_flags_t) request->request_rpc.query.flags; reply->response_to = request->request_rpc.header.request_id; q_put (request->replies, reply); }
void mongoc_find_and_modify_opts_get_extra ( const mongoc_find_and_modify_opts_t *opts, bson_t *extra) { BSON_ASSERT (opts); BSON_ASSERT (extra); bson_copy_to (&opts->extra, extra); }
static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_find_opquery_t *data_dst = bson_malloc0 (sizeof (data_find_opquery_t)); data_find_opquery_t *data_src = (data_find_opquery_t *) src->data; _mongoc_cursor_response_legacy_init (&data_dst->response_legacy); bson_copy_to (&data_src->filter, &data_dst->filter); dst->data = data_dst; }
static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_cmd_t *data_src = (data_cmd_t *) src->data; data_cmd_t *data_dst = bson_malloc0 (sizeof (data_cmd_t)); bson_init (&data_dst->response.reply); _mongoc_cursor_response_legacy_init (&data_dst->response_legacy); bson_copy_to (&data_src->cmd, &data_dst->cmd); dst->data = data_dst; }
mongoc_read_prefs_t * mongoc_read_prefs_copy (const mongoc_read_prefs_t *read_prefs) { mongoc_read_prefs_t *ret = NULL; if (read_prefs) { ret = mongoc_read_prefs_new(read_prefs->mode); bson_copy_to(&read_prefs->tags, &ret->tags); } return ret; }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { bson_oid_copy (bson_iter_oid (&iter), &file->files_id); } else if (0 == strcmp (key, "length")) { file->length = bson_iter_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { file->chunk_size = bson_iter_int32 (&iter); } else if (0 == strcmp (key, "uploadDate")) { file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); }
void mongoc_client_pool_get_metadata (mongoc_client_pool_t *pool, bson_t *buf) { ENTRY; BSON_ASSERT (buf); mongoc_mutex_lock (&pool->mutex); bson_copy_to (&pool->topology->scanner->ismaster_metadata, buf); mongoc_mutex_unlock (&pool->mutex); EXIT; }
void mongoc_find_and_modify_opts_get_sort (const mongoc_find_and_modify_opts_t *opts, bson_t *sort) { BSON_ASSERT (opts); BSON_ASSERT (sort); if (opts->sort) { bson_copy_to (opts->sort, sort); } else { bson_init (sort); } }
void mongoc_find_and_modify_opts_get_fields ( const mongoc_find_and_modify_opts_t *opts, bson_t *fields) { BSON_ASSERT (opts); BSON_ASSERT (fields); if (opts->fields) { bson_copy_to (opts->fields, fields); } else { bson_init (fields); } }
void mongoc_find_and_modify_opts_get_update ( const mongoc_find_and_modify_opts_t *opts, bson_t *update) { BSON_ASSERT (opts); BSON_ASSERT (update); if (opts->update) { bson_copy_to (opts->update, update); } else { bson_init (update); } }
void mongoc_read_prefs_set_tags (mongoc_read_prefs_t *read_prefs, const bson_t *tags) { BSON_ASSERT (read_prefs); bson_destroy(&read_prefs->tags); if (tags) { bson_copy_to(tags, &read_prefs->tags); } else { bson_init(&read_prefs->tags); } }
void mongoc_read_prefs_set_tags (mongoc_read_prefs_t *read_prefs, const bson_t *tags) { bson_return_if_fail(read_prefs); bson_destroy(&read_prefs->tags); if (tags) { bson_copy_to(tags, &read_prefs->tags); } else { bson_init(&read_prefs->tags); } }
static bson_bool_t ha_replica_set_get_status (ha_replica_set_t *replica_set, bson_t *status) { mongoc_database_t *db; mongoc_client_t *client; mongoc_cursor_t *cursor; const bson_t *doc; bson_bool_t ret = FALSE; ha_node_t *node; bson_t cmd; char *uristr; bson_init(&cmd); bson_append_int32(&cmd, "replSetGetStatus", -1, 1); for (node = replica_set->nodes; !ret && node; node = node->next) { uristr = bson_strdup_printf("mongodb://127.0.0.1:%hu/?slaveOk=true", node->port); client = mongoc_client_new(uristr); #ifdef MONGOC_ENABLE_SSL if (replica_set->ssl_opt) { mongoc_client_set_ssl_opts(client, replica_set->ssl_opt); } #endif bson_free(uristr); db = mongoc_client_get_database(client, "admin"); if ((cursor = mongoc_database_command(db, MONGOC_QUERY_SLAVE_OK, 0, 1, &cmd, NULL, NULL))) { if (mongoc_cursor_next(cursor, &doc)) { bson_copy_to(doc, status); ret = TRUE; } mongoc_cursor_destroy(cursor); } mongoc_database_destroy(db); mongoc_client_destroy(client); } return ret; }
void _mongoc_cursor_cursorid_init (mongoc_cursor_t *cursor, const bson_t *command) { ENTRY; bson_destroy (&cursor->filter); bson_copy_to (command, &cursor->filter); cursor->iface_data = _mongoc_cursor_cursorid_new (); memcpy (&cursor->iface, &gMongocCursorCursorid, sizeof (mongoc_cursor_interface_t)); EXIT; }
mongoc_async_cmd_t * mongoc_async_cmd_new (mongoc_async_t *async, mongoc_stream_t *stream, bool is_setup_done, struct addrinfo *dns_result, mongoc_async_cmd_initiate_t initiator, int64_t initiate_delay_ms, mongoc_async_cmd_setup_t setup, void *setup_ctx, const char *dbname, const bson_t *cmd, mongoc_async_cmd_cb_t cb, void *cb_data, int64_t timeout_msec) { mongoc_async_cmd_t *acmd; BSON_ASSERT (cmd); BSON_ASSERT (dbname); acmd = (mongoc_async_cmd_t *) bson_malloc0 (sizeof (*acmd)); acmd->async = async; acmd->dns_result = dns_result; acmd->timeout_msec = timeout_msec; acmd->stream = stream; acmd->initiator = initiator; acmd->initiate_delay_ms = initiate_delay_ms; acmd->setup = setup; acmd->setup_ctx = setup_ctx; acmd->cb = cb; acmd->data = cb_data; acmd->connect_started = bson_get_monotonic_time (); bson_copy_to (cmd, &acmd->cmd); _mongoc_array_init (&acmd->array, sizeof (mongoc_iovec_t)); _mongoc_buffer_init (&acmd->buffer, NULL, 0, NULL, NULL); _mongoc_async_cmd_init_send (acmd, dbname); _mongoc_async_cmd_state_start (acmd, is_setup_done); async->ncmds++; DL_APPEND (async->cmds, acmd); return acmd; }
void _mongoc_cursor_array_set_bson (mongoc_cursor_t *cursor, const bson_t *bson) { mongoc_cursor_array_t *arr; ENTRY; arr = (mongoc_cursor_array_t *)cursor->iface_data; bson_copy_to(bson, &arr->bson); arr->has_array = true; arr->has_synthetic_bson = true; bson_iter_init(&arr->iter, &arr->bson); }
bool _mongoc_client_command_simple_with_hint (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bool is_write_command, bson_t *reply, uint32_t hint, bson_error_t *error) { mongoc_cursor_t *cursor; const bson_t *doc; bool ret; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); cursor = mongoc_client_command (client, db_name, MONGOC_QUERY_NONE, 0, 1, 0, command, NULL, read_prefs); cursor->hint = hint; cursor->is_write_command = is_write_command ? 1 : 0; ret = mongoc_cursor_next (cursor, &doc); if (reply) { if (ret) { bson_copy_to (doc, reply); } else { bson_init (reply); } } if (!ret) { mongoc_cursor_error (cursor, error); } mongoc_cursor_destroy (cursor); return ret; }
void _mongoc_cursor_array_init (mongoc_cursor_t *cursor, const bson_t *command, const char *field_name) { ENTRY; if (command) { bson_destroy (&cursor->query); bson_copy_to (command, &cursor->query); } cursor->iface_data = _mongoc_cursor_array_new (field_name); memcpy (&cursor->iface, &gMongocCursorArray, sizeof (mongoc_cursor_interface_t)); EXIT; }
/* * Count the number of documents. */ double MongoAggregateCount(MONGO_CONN* conn, const char* database, const char* collection, const BSON* b) { BSON *command = NULL; BSON *reply = NULL; BSON *doc = NULL; double count = 0; mongoc_cursor_t *cursor = NULL; bool ret = false; command = BsonCreate(); reply = BsonCreate(); BsonAppendUTF8(command, "count", (char*)collection); if (b) /* not empty */ BsonAppendBson(command, "query", (BSON*)b); BsonFinish(command); cursor = mongoc_client_command(conn, database, MONGOC_QUERY_SLAVE_OK, 0, 1, 0, command, NULL, NULL); if (cursor) { ret = mongoc_cursor_next(cursor, (const BSON**)&doc); if (ret) { bson_iter_t it; bson_copy_to(doc, reply); if (bson_iter_init_find(&it, reply, "n")) count = BsonIterDouble(&it); BsonDestroy(doc); } mongoc_cursor_destroy(cursor); } BsonDestroy(reply); BsonDestroy(command); return count; }
mongoc_cursor_t * _mongoc_cursor_new (mongoc_client_t *client, const char *db_and_collection, mongoc_query_flags_t flags, bson_uint32_t skip, bson_uint32_t limit, bson_uint32_t batch_size, bson_bool_t is_command, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { mongoc_read_mode_t mode; mongoc_cursor_t *cursor; const bson_t *tags; const char *mode_str; bson_t child; ENTRY; BSON_ASSERT(client); BSON_ASSERT(db_and_collection); BSON_ASSERT(query); /* we can't have exhaust queries with limits */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && limit)); /* we can't have exhaust queries with sharded clusters */ BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid)); /* * Cursors execute their query lazily. This sadly means that we must copy * some extra data around between the bson_t structures. This should be * small in most cases, so it reduces to a pure memcpy. The benefit to this * design is simplified error handling by API consumers. */ cursor = bson_malloc0(sizeof *cursor); cursor->client = client; strncpy(cursor->ns, db_and_collection, sizeof cursor->ns - 1); cursor->nslen = strlen(cursor->ns); cursor->flags = flags; cursor->skip = skip; cursor->limit = limit; cursor->batch_size = cursor->batch_size; cursor->is_command = is_command; if (!bson_has_field (query, "$query")) { bson_init (&cursor->query); bson_append_document (&cursor->query, "$query", 6, query); } else { bson_copy_to (query, &cursor->query); } if (read_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (read_prefs); mode = mongoc_read_prefs_get_mode (read_prefs); tags = mongoc_read_prefs_get_tags (read_prefs); if (mode != MONGOC_READ_PRIMARY) { flags |= MONGOC_QUERY_SLAVE_OK; if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) { bson_append_document_begin (&cursor->query, "$readPreference", 15, &child); mode_str = _mongoc_cursor_get_read_mode_string (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (tags) { bson_append_array (&child, "tags", 4, tags); } bson_append_document_end (&cursor->query, &child); } } } if (fields) { bson_copy_to(fields, &cursor->fields); } else { bson_init(&cursor->fields); } _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL); mongoc_counter_cursors_active_inc(); RETURN(cursor); }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }