static bool
_mongoc_cursor_array_next (mongoc_cursor_t *cursor,
                           const bson_t   **bson)
{
   bool ret = true;
   mongoc_cursor_array_t *arr;

   ENTRY;

   arr = (mongoc_cursor_array_t *)cursor->iface_data;
   *bson = NULL;

   if (!arr->has_array) {
      ret = _mongoc_cursor_array_prime(cursor);
   }

   if (ret) {
      ret = bson_iter_next (&arr->iter);
   }

   if (ret) {
      bson_iter_document (&arr->iter, &arr->document_len, &arr->document);
      bson_init_static (&arr->bson, arr->document, arr->document_len);

      *bson = &arr->bson;
   }

   RETURN (ret);
}
Beispiel #2
0
static bool hippo_cursor_next(MongoDBDriverCursorData* data)
{
	invalidate_current(data);

	data->next_after_rewind++;

	data->current++;
	if (data->is_command_cursor && bson_iter_next(&data->first_batch_iter)) {
		if (BSON_ITER_HOLDS_DOCUMENT(&data->first_batch_iter)) {
			const uint8_t *document = NULL;
			uint32_t document_len = 0;

			bson_iter_document(&data->first_batch_iter, &document_len, &document);

			data->zchild_active = true;
			return true;
		}
	}
	if (hippo_cursor_load_next(data)) {
		return true;
	} else {
		invalidate_current(data);
	}

	return false;
}
bool
_mongoc_convert_document (mongoc_client_t *client,
                          const bson_iter_t *iter,
                          bson_t *doc,
                          bson_error_t *error)
{
   uint32_t len;
   const uint8_t *data;
   bson_t value;

   if (!BSON_ITER_HOLDS_DOCUMENT (iter)) {
      CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain document,"
                      " not %s",
                      bson_iter_key (iter),
                      _mongoc_bson_type_to_str (bson_iter_type (iter)));
   }

   bson_iter_document (iter, &len, &data);
   if (!bson_init_static (&value, data, len)) {
      BSON_ERR ("Corrupt BSON in field \"%s\" in opts", bson_iter_key (iter));
   }

   bson_destroy (doc);
   bson_copy_to (&value, doc);

   return true;
}
Beispiel #4
0
int monary_load_size_value(const bson_iter_t* bsonit,
                           monary_column_item* citem,
                           int idx)
{
    bson_type_t type;
    const uint8_t* discard;
    uint32_t size;
    uint32_t* dest;

    type = bson_iter_type(bsonit);
    switch (type) {
        case BSON_TYPE_UTF8:
        case BSON_TYPE_CODE:
            bson_iter_utf8(bsonit, &size);
            break;
        case BSON_TYPE_BINARY:
            bson_iter_binary(bsonit, NULL, &size, &discard);
            break;
        case BSON_TYPE_DOCUMENT:
            bson_iter_document(bsonit, &size, &discard);
            break;
        case BSON_TYPE_ARRAY:
            bson_iter_array(bsonit, &size, &discard);
            break;
        default:
            return 0;
    }
    dest = ((uint32_t*) citem->storage) + idx;
    memcpy(dest, &size, sizeof(uint32_t));
    return 1;
}
Beispiel #5
0
/* {{{ proto WriteError[] WriteResult::getWriteErrors()
   Returns any write errors that occurred */
PHP_METHOD(WriteResult, getWriteErrors)
{
	bson_iter_t iter, child;
	php_phongo_writeresult_t *intern;
	SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)


	intern = Z_WRITERESULT_OBJ_P(getThis());

	if (zend_parse_parameters_none() == FAILURE) {
		return;
	}


	array_init(return_value);

	if (bson_iter_init_find(&iter, intern->reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) {
		while (bson_iter_next(&child)) {
			bson_t cbson;
			uint32_t len;
			const uint8_t *data;
#if PHP_VERSION_ID >= 70000
			zval writeerror;
#else
			zval *writeerror = NULL;
#endif

			if (!BSON_ITER_HOLDS_DOCUMENT(&child)) {
				continue;
			}

			bson_iter_document(&child, &len, &data);

			if (!bson_init_static(&cbson, data, len)) {
				continue;
			}

#if PHP_VERSION_ID >= 70000
			object_init_ex(&writeerror, php_phongo_writeerror_ce);

			if (!phongo_writeerror_init(&writeerror, &cbson TSRMLS_CC)) {
				zval_ptr_dtor(&writeerror);
				continue;
			}

			add_next_index_zval(return_value, &writeerror);
#else
			MAKE_STD_ZVAL(writeerror);
			object_init_ex(writeerror, php_phongo_writeerror_ce);

			if (!phongo_writeerror_init(writeerror, &cbson TSRMLS_CC)) {
				zval_ptr_dtor(&writeerror);
				continue;
			}

			add_next_index_zval(return_value, writeerror);
#endif
		}
	}
}
types::b_document element::get_document() const {
    CITER;

    const std::uint8_t* buf;
    std::uint32_t len;

    bson_iter_document(&iter, &len, &buf);

    return types::b_document{document::view{buf, len}};
}
Beispiel #7
0
bool
BsonIterSubObject(BSON_ITERATOR *it, BSON *b)
{
	const uint8_t *buffer;
	uint32_t len;

	bson_iter_document(it, &len, &buffer);
	bson_init_static(b, buffer, len);
	return true;
}
Beispiel #8
0
types::b_document element::get_document() const {
    BSONCXX_TYPE_CHECK(k_document);
    BSONCXX_CITER;

    const std::uint8_t* buf;
    std::uint32_t len;

    bson_iter_document(&iter, &len, &buf);

    return types::b_document{document::view{buf, len}};
}
/**
 * _mongoc_gridfs_file_new_from_bson:
 *
 * creates a gridfs file from a bson object
 *
 * This is only really useful for instantiating a gridfs file from a server
 * side object
 */
mongoc_gridfs_file_t *
_mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs,
                                   const bson_t    *data)
{
   mongoc_gridfs_file_t *file;
   const char *key;
   bson_iter_t iter;
   const uint8_t *buf;
   uint32_t buf_len;

   ENTRY;

   BSON_ASSERT (gridfs);
   BSON_ASSERT (data);

   file = bson_malloc0 (sizeof *file);

   file->gridfs = gridfs;
   bson_copy_to (data, &file->bson);

   bson_iter_init (&iter, &file->bson);

   while (bson_iter_next (&iter)) {
      key = bson_iter_key (&iter);

      if (0 == strcmp (key, "_id")) {
         bson_oid_copy (bson_iter_oid (&iter), &file->files_id);
      } else if (0 == strcmp (key, "length")) {
         file->length = bson_iter_int64 (&iter);
      } else if (0 == strcmp (key, "chunkSize")) {
         file->chunk_size = bson_iter_int32 (&iter);
      } else if (0 == strcmp (key, "uploadDate")) {
         file->upload_date = bson_iter_date_time (&iter);
      } else if (0 == strcmp (key, "md5")) {
         file->bson_md5 = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "filename")) {
         file->bson_filename = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "contentType")) {
         file->bson_content_type = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "aliases")) {
         bson_iter_array (&iter, &buf_len, &buf);
         bson_init_static (&file->bson_aliases, buf, buf_len);
      } else if (0 == strcmp (key, "metadata")) {
         bson_iter_document (&iter, &buf_len, &buf);
         bson_init_static (&file->bson_metadata, buf, buf_len);
      }
   }

   /* TODO: is there are a minimal object we should be verifying that we
    * actually have here? */

   RETURN (file);
}
Beispiel #10
0
void
mongoc_apm_command_started_init (mongoc_apm_command_started_t *event,
                                 const bson_t *command,
                                 const char *database_name,
                                 const char *command_name,
                                 int64_t request_id,
                                 int64_t operation_id,
                                 const mongoc_host_list_t *host,
                                 uint32_t server_id,
                                 void *context)
{
   bson_iter_t iter;
   uint32_t len;
   const uint8_t *data;

   /* Command Monitoring Spec:
    *
    * In cases where queries or commands are embedded in a $query parameter
    * when a read preference is provided, they MUST be unwrapped and the value
    * of the $query attribute becomes the filter or the command in the started
    * event. The read preference will subsequently be dropped as it is
    * considered metadata and metadata is not currently provided in the command
    * events.
    */
   if (bson_has_field (command, "$readPreference")) {
      if (bson_iter_init_find (&iter, command, "$query") &&
          BSON_ITER_HOLDS_DOCUMENT (&iter)) {
         bson_iter_document (&iter, &len, &data);
         event->command = bson_new_from_data (data, len);
         event->command_owned = true;
      } else {
         /* Got $readPreference without $query, probably OP_MSG */
         event->command = (bson_t *) command;
         event->command_owned = false;
      }
   } else {
      /* discard "const", we promise not to modify "command" */
      event->command = (bson_t *) command;
      event->command_owned = false;
   }

   event->database_name = database_name;
   event->command_name = command_name;
   event->request_id = request_id;
   event->operation_id = operation_id;
   event->host = host;
   event->server_id = server_id;
   event->context = context;
}
Beispiel #11
0
static void hippo_cursor_rewind(MongoDBDriverCursorData* data)
{
	if (data->next_after_rewind != 0) {
		if (data->zchild_active) {
			throw MongoDriver::Utils::throwLogicException("Cursors cannot rewind after starting iteration");
		} else { /* If we're not active, image we're now have fully iterated */
			throw MongoDriver::Utils::throwLogicException("Cursors cannot yield multiple iterators");
		}
	}

	invalidate_current(data);
	data->current = 0;
	data->zchild_active = false;

	if (data->first_batch) {
		if (data->is_command_cursor) {
			if (!bson_iter_init(&data->first_batch_iter, data->first_batch)) {
				return;
			}
			if (bson_iter_next(&data->first_batch_iter)) {
				if (BSON_ITER_HOLDS_DOCUMENT(&data->first_batch_iter)) {
					const uint8_t *document = NULL;
					uint32_t document_len = 0;
					Variant v;

					bson_iter_document(&data->first_batch_iter, &document_len, &document);

					BsonToVariantConverter convertor(document, document_len, data->bson_options);
					convertor.convert(&v);
					data->zchild_active = true;
					data->zchild = v;
				}
			}
		} else {
			Variant v;

			BsonToVariantConverter convertor(bson_get_data(data->first_batch), data->first_batch->len, data->bson_options);
			convertor.convert(&v);
			data->zchild_active = true;
			data->zchild = v;
		}
	}
}
Beispiel #12
0
void bsonToArray(bson_iter_t* iter, Array* output, bool isDocument) {
  bson_t bson;
  const uint8_t *document = NULL;
  uint32_t document_len = 0;

  if (isDocument) {
    bson_iter_document(iter, &document_len, &document);
  } else {
    bson_iter_array(iter, &document_len, &document);
  }

  bson_init_static(&bson, document, document_len);

  Array child = Array::Create();
  bsonToVariant(&bson, &child);

  output->add(
    String(bson_iter_key(iter)),
    child
  );   
}
Beispiel #13
0
int monary_load_document_value(const bson_iter_t* bsonit,
                               monary_column_item* citem,
                               int idx)
{
    uint32_t document_len;      // The length of document in bytes.
    const uint8_t* document;    // Pointer to the immutable document buffer.
    uint8_t* dest;

    if (BSON_ITER_HOLDS_DOCUMENT(bsonit)) {
        bson_iter_document(bsonit, &document_len, &document);
        if (document_len > citem->type_arg) {
            document_len = citem->type_arg;
        }

        dest = ((uint8_t*) citem->storage) + (idx * document_len);
        memcpy(dest, document, document_len);
        return 1;
    } else {
        return 0;
    }
}
Beispiel #14
0
static request_t *
_check_op_query (mock_server_t                    *server,
                 test_collection_find_with_opts_t *test_data)
{
   mongoc_query_flags_t flags;
   request_t *request;
   const bson_t *doc;
   bson_iter_t iter;
   uint32_t len;
   const uint8_t *data;
   bson_t query;

   /* Server Selection Spec: all queries to standalone set slaveOk. */
   flags = test_data->expected_flags | MONGOC_QUERY_SLAVE_OK;

   request = mock_server_receives_query (
      server,
      "db.collection",
      flags,
      test_data->expected_skip,
      test_data->expected_n_return,
      test_data->expected_op_query,
      test_data->expected_op_query_projection);

   ASSERT (request);

   /* check that nothing unexpected is in $query */
   if (bson_empty (test_data->filter_bson)) {
      doc = request_get_doc (request, 0);

      if (bson_iter_init_find (&iter, doc, "$query")) {
         ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter));
         bson_iter_document (&iter, &len, &data);
         bson_init_static (&query, data, (size_t) len);
         ASSERT (bson_empty (&query));
      }
   }

   return request;
}
static void
_mongoc_cursor_cursorid_read_from_batch (mongoc_cursor_t *cursor,
                                         const bson_t   **bson)
{
   mongoc_cursor_cursorid_t *cid;
   const uint8_t *data = NULL;
   uint32_t data_len = 0;

   ENTRY;

   cid = (mongoc_cursor_cursorid_t *)cursor->iface_data;
   BSON_ASSERT (cid);

   if (bson_iter_next (&cid->batch_iter) &&
       BSON_ITER_HOLDS_DOCUMENT (&cid->batch_iter)) {
      bson_iter_document (&cid->batch_iter, &data_len, &data);

      if (bson_init_static (&cid->current_doc, data, data_len)) {
         *bson = &cid->current_doc;
      }
   }
}
Beispiel #16
0
bool
mongoc_uri_get_mechanism_properties (const mongoc_uri_t *uri, bson_t *properties)
{
   bson_iter_t iter;

   if (!uri) {
      return false;
   }

   if (bson_iter_init_find_case (&iter, &uri->credentials, "mechanismProperties") &&
      BSON_ITER_HOLDS_DOCUMENT (&iter)) {
      uint32_t len = 0;
      const uint8_t *data = NULL;

      bson_iter_document (&iter, &len, &data);
      bson_init_static (properties, data, len);

      return true;
   }

   return false;
}
bool
_mongoc_cursor_array_next (mongoc_cursor_t *cursor,
                           const bson_t   **bson)
{
   bool ret = true;
   mongoc_cursor_array_t *arr;
   bson_iter_t iter;

   ENTRY;

   arr = cursor->iface_data;
   *bson = NULL;

   if (!arr->has_array) {
      arr->has_array = true;

      ret = _mongoc_cursor_next (cursor, &arr->result);

      if (!(ret &&
            bson_iter_init_find (&iter, arr->result, "result") &&
            BSON_ITER_HOLDS_ARRAY (&iter) &&
            bson_iter_recurse (&iter, &arr->iter) &&
            bson_iter_next (&arr->iter))) {
         ret = false;
      }
   } else {
      ret = bson_iter_next (&arr->iter);
   }

   if (ret) {
      bson_iter_document (&arr->iter, &arr->document_len, &arr->document);
      bson_init_static (&arr->bson, arr->document, arr->document_len);

      *bson = &arr->bson;
   }

   RETURN (ret);
}
void
_mongoc_write_command_update_legacy (mongoc_write_command_t *command,
                                     mongoc_client_t *client,
                                     mongoc_server_stream_t *server_stream,
                                     const char *database,
                                     const char *collection,
                                     uint32_t offset,
                                     mongoc_write_result_t *result,
                                     bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bson_t update, selector;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         BSON_ASSERT (bson_init_static (&doc, data, len));

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      BSON_ASSERT (bson_iter_init (&subiter, bson));
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            BSON_ASSERT (bson_init_static (&update, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            BSON_ASSERT (bson_init_static (&selector, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
         }
      }

      _mongoc_monitor_legacy_write (
         client, command, database, collection, server_stream, request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
Beispiel #19
0
void
_mongoc_write_command_delete_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   const uint8_t *data;
   mongoc_rpc_t rpc;
   uint32_t request_id;
   bson_iter_t q_iter;
   uint32_t len;
   int64_t limit = 0;
   bson_t *gle = NULL;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   bool r;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_DELETE_FAILED,
                      "Cannot do an empty delete.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
      r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") &&
           BSON_ITER_HOLDS_DOCUMENT (&q_iter));

      BSON_ASSERT (r);
      bson_iter_document (&q_iter, &len, &data);
      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);
      if (len > max_bson_obj_size) {
         _mongoc_write_command_too_large_error (
            error, 0, len, max_bson_obj_size, NULL);
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_DELETE;
      rpc.delete_.zero = 0;
      rpc.delete_.collection = ns;

      if (bson_iter_find (&q_iter, "limit") &&
          (BSON_ITER_HOLDS_INT (&q_iter))) {
         limit = bson_iter_as_int64 (&q_iter);
      }

      rpc.delete_.flags =
         limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
      rpc.delete_.selector = data;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_DELETE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);

   EXIT;
}
Beispiel #20
0
void
_mongoc_write_result_merge (mongoc_write_result_t *result,   /* IN */
                            mongoc_write_command_t *command, /* IN */
                            const bson_t *reply,             /* IN */
                            uint32_t offset)
{
   int32_t server_index = 0;
   const bson_value_t *value;
   bson_iter_t iter;
   bson_iter_t citer;
   bson_iter_t ar;
   int32_t n_upserted = 0;
   int32_t affected = 0;

   ENTRY;

   BSON_ASSERT (result);
   BSON_ASSERT (reply);

   if (bson_iter_init_find (&iter, reply, "n") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      affected = bson_iter_int32 (&iter);
   }

   if (bson_iter_init_find (&iter, reply, "writeErrors") &&
       BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) &&
       bson_iter_next (&citer)) {
      result->failed = true;
   }

   switch (command->type) {
   case MONGOC_WRITE_COMMAND_INSERT:
      result->nInserted += affected;
      break;
   case MONGOC_WRITE_COMMAND_DELETE:
      result->nRemoved += affected;
      break;
   case MONGOC_WRITE_COMMAND_UPDATE:

      /* server returns each upserted _id with its index into this batch
       * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */
      if (bson_iter_init_find (&iter, reply, "upserted")) {
         if (BSON_ITER_HOLDS_ARRAY (&iter) &&
             (bson_iter_recurse (&iter, &ar))) {
            while (bson_iter_next (&ar)) {
               if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
                   bson_iter_recurse (&ar, &citer) &&
                   bson_iter_find (&citer, "index") &&
                   BSON_ITER_HOLDS_INT32 (&citer)) {
                  server_index = bson_iter_int32 (&citer);

                  if (bson_iter_recurse (&ar, &citer) &&
                      bson_iter_find (&citer, "_id")) {
                     value = bson_iter_value (&citer);
                     _mongoc_write_result_append_upsert (
                        result, offset + server_index, value);
                     n_upserted++;
                  }
               }
            }
         }
         result->nUpserted += n_upserted;
         /*
          * XXX: The following addition to nMatched needs some checking.
          *      I'm highly skeptical of it.
          */
         result->nMatched += BSON_MAX (0, (affected - n_upserted));
      } else {
         result->nMatched += affected;
      }
      if (bson_iter_init_find (&iter, reply, "nModified") &&
          BSON_ITER_HOLDS_INT32 (&iter)) {
         result->nModified += bson_iter_int32 (&iter);
      }
      break;
   default:
      BSON_ASSERT (false);
      break;
   }

   if (bson_iter_init_find (&iter, reply, "writeErrors") &&
       BSON_ITER_HOLDS_ARRAY (&iter)) {
      _mongoc_write_result_merge_arrays (
         offset, result, &result->writeErrors, &iter);
   }

   if (bson_iter_init_find (&iter, reply, "writeConcernError") &&
       BSON_ITER_HOLDS_DOCUMENT (&iter)) {
      uint32_t len;
      const uint8_t *data;
      bson_t write_concern_error;
      char str[16];
      const char *key;

      /* writeConcernError is a subdocument in the server response
       * append it to the result->writeConcernErrors array */
      bson_iter_document (&iter, &len, &data);
      bson_init_static (&write_concern_error, data, len);

      bson_uint32_to_string (
         result->n_writeConcernErrors, &key, str, sizeof str);

      if (!bson_append_document (
             &result->writeConcernErrors, key, -1, &write_concern_error)) {
         MONGOC_ERROR ("Error adding \"%s\" to writeConcernErrors.\n", key);
      }

      result->n_writeConcernErrors++;
   }

   /* inefficient if there are ever large numbers: for each label in each err,
    * we linear-search result->errorLabels to see if it's included yet */
   _mongoc_bson_array_copy_labels_to (reply, &result->errorLabels);

   EXIT;
}
void
mongoc_server_description_handle_ismaster (
   mongoc_server_description_t   *sd,
   const bson_t                  *ismaster_response,
   int64_t                        rtt_msec,
   bson_error_t                  *error)
{
   bson_iter_t iter;
   bool is_master = false;
   bool is_shard = false;
   bool is_secondary = false;
   bool is_arbiter = false;
   bool is_replicaset = false;
   bool is_hidden = false;
   const uint8_t *bytes;
   uint32_t len;
   int num_keys = 0;
   ENTRY;

   BSON_ASSERT (sd);

   mongoc_server_description_reset (sd);
   if (!ismaster_response) {
      EXIT;
   }

   bson_destroy (&sd->last_is_master);
   bson_copy_to (ismaster_response, &sd->last_is_master);
   sd->has_is_master = true;

   bson_iter_init (&iter, &sd->last_is_master);

   while (bson_iter_next (&iter)) {
      num_keys++;
      if (strcmp ("ok", bson_iter_key (&iter)) == 0) {
         /* ismaster responses never have ok: 0, but spec requires we check */
         if (! bson_iter_as_bool (&iter)) goto failure;
      } else if (strcmp ("ismaster", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure;
         is_master = bson_iter_bool (&iter);
      } else if (strcmp ("maxMessageSizeBytes", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure;
         sd->max_msg_size = bson_iter_int32 (&iter);
      } else if (strcmp ("maxBsonObjectSize", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure;
         sd->max_bson_obj_size = bson_iter_int32 (&iter);
      } else if (strcmp ("maxWriteBatchSize", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure;
         sd->max_write_batch_size = bson_iter_int32 (&iter);
      } else if (strcmp ("minWireVersion", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure;
         sd->min_wire_version = bson_iter_int32 (&iter);
      } else if (strcmp ("maxWireVersion", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure;
         sd->max_wire_version = bson_iter_int32 (&iter);
      } else if (strcmp ("msg", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure;
         is_shard = !!bson_iter_utf8 (&iter, NULL);
      } else if (strcmp ("setName", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure;
         sd->set_name = bson_iter_utf8 (&iter, NULL);
      } else if (strcmp ("secondary", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure;
         is_secondary = bson_iter_bool (&iter);
      } else if (strcmp ("hosts", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure;
         bson_iter_array (&iter, &len, &bytes);
         bson_init_static (&sd->hosts, bytes, len);
      } else if (strcmp ("passives", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure;
         bson_iter_array (&iter, &len, &bytes);
         bson_init_static (&sd->passives, bytes, len);
      } else if (strcmp ("arbiters", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure;
         bson_iter_array (&iter, &len, &bytes);
         bson_init_static (&sd->arbiters, bytes, len);
      } else if (strcmp ("primary", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure;
         sd->current_primary = bson_iter_utf8 (&iter, NULL);
      } else if (strcmp ("arbiterOnly", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure;
         is_arbiter = bson_iter_bool (&iter);
      } else if (strcmp ("isreplicaset", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure;
         is_replicaset = bson_iter_bool (&iter);
      } else if (strcmp ("tags", bson_iter_key (&iter)) == 0) {
         if (! BSON_ITER_HOLDS_DOCUMENT (&iter)) goto failure;
         bson_iter_document (&iter, &len, &bytes);
         bson_init_static (&sd->tags, bytes, len);
      } else if (strcmp ("hidden", bson_iter_key (&iter)) == 0) {
         is_hidden = bson_iter_bool (&iter);
      }
   }

   if (is_shard) {
      sd->type = MONGOC_SERVER_MONGOS;
   } else if (sd->set_name) {
      if (is_hidden) {
         sd->type = MONGOC_SERVER_RS_OTHER;
      } else if (is_master) {
         sd->type = MONGOC_SERVER_RS_PRIMARY;
      } else if (is_secondary) {
         sd->type = MONGOC_SERVER_RS_SECONDARY;
      } else if (is_arbiter) {
         sd->type = MONGOC_SERVER_RS_ARBITER;
      } else {
         sd->type = MONGOC_SERVER_RS_OTHER;
      }
   } else if (is_replicaset) {
      sd->type = MONGOC_SERVER_RS_GHOST;
   } else if (num_keys > 0) {
      sd->type = MONGOC_SERVER_STANDALONE;
   } else {
      sd->type = MONGOC_SERVER_UNKNOWN;
   }

   mongoc_server_description_update_rtt(sd, rtt_msec);

   EXIT;

failure:
   sd->type = MONGOC_SERVER_UNKNOWN;
   sd->round_trip_time = -1;

   EXIT;
}
/**
 * _mongoc_gridfs_file_new_from_bson:
 *
 * creates a gridfs file from a bson object
 *
 * This is only really useful for instantiating a gridfs file from a server
 * side object
 */
mongoc_gridfs_file_t *
_mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs,
                                   const bson_t    *data)
{
   mongoc_gridfs_file_t *file;
   const bson_value_t *value;
   const char *key;
   bson_iter_t iter;
   const uint8_t *buf;
   uint32_t buf_len;

   ENTRY;

   BSON_ASSERT (gridfs);
   BSON_ASSERT (data);

   file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file);

   file->gridfs = gridfs;
   bson_copy_to (data, &file->bson);

   bson_iter_init (&iter, &file->bson);

   while (bson_iter_next (&iter)) {
      key = bson_iter_key (&iter);

      if (0 == strcmp (key, "_id")) {
         value = bson_iter_value (&iter);
         bson_value_copy (value, &file->files_id);
      } else if (0 == strcmp (key, "length")) {
         if (!BSON_ITER_HOLDS_INT32 (&iter) &&
             !BSON_ITER_HOLDS_INT64 (&iter) &&
             !BSON_ITER_HOLDS_DOUBLE (&iter)) {
            GOTO (failure);
         }
         file->length = bson_iter_as_int64 (&iter);
      } else if (0 == strcmp (key, "chunkSize")) {
         if (!BSON_ITER_HOLDS_INT32 (&iter) &&
             !BSON_ITER_HOLDS_INT64 (&iter) &&
             !BSON_ITER_HOLDS_DOUBLE (&iter)) {
            GOTO (failure);
         }
         if (bson_iter_as_int64 (&iter) > INT32_MAX) {
            GOTO (failure);
         }
         file->chunk_size = (int32_t)bson_iter_as_int64 (&iter);
      } else if (0 == strcmp (key, "uploadDate")) {
         if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){
            GOTO (failure);
         }
         file->upload_date = bson_iter_date_time (&iter);
      } else if (0 == strcmp (key, "md5")) {
         if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
            GOTO (failure);
         }
         file->bson_md5 = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "filename")) {
         if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
            GOTO (failure);
         }
         file->bson_filename = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "contentType")) {
         if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
            GOTO (failure);
         }
         file->bson_content_type = bson_iter_utf8 (&iter, NULL);
      } else if (0 == strcmp (key, "aliases")) {
         if (!BSON_ITER_HOLDS_ARRAY (&iter)) {
            GOTO  (failure);
         }
         bson_iter_array (&iter, &buf_len, &buf);
         bson_init_static (&file->bson_aliases, buf, buf_len);
      } else if (0 == strcmp (key, "metadata")) {
         if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
            GOTO (failure);
         }
         bson_iter_document (&iter, &buf_len, &buf);
         bson_init_static (&file->bson_metadata, buf, buf_len);
      }
   }

   /* TODO: is there are a minimal object we should be verifying that we
    * actually have here? */

   RETURN (file);

failure:
   bson_destroy (&file->bson);

   RETURN (NULL);
}
static void
_mongoc_write_command(mongoc_write_command_t       *command,
                      mongoc_client_t              *client,
                      mongoc_server_stream_t       *server_stream,
                      const char                   *database,
                      const char                   *collection,
                      const mongoc_write_concern_t *write_concern,
                      uint32_t                      offset,
                      mongoc_write_result_t        *result,
                      bson_error_t                 *error)
{
   const uint8_t *data;
   bson_iter_t iter;
   const char *key;
   uint32_t len = 0;
   bson_t tmp;
   bson_t ar;
   bson_t cmd;
   bson_t reply;
   char str [16];
   bool has_more;
   bool ret = false;
   uint32_t i;
   int32_t max_bson_obj_size;
   int32_t max_write_batch_size;
   int32_t min_wire_version;
   uint32_t key_len;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream);

   /*
    * If we have an unacknowledged write and the server supports the legacy
    * opcodes, then submit the legacy opcode so we don't need to wait for
    * a response from the server.
    */

   min_wire_version = server_stream->sd->min_wire_version;
   if ((min_wire_version == 0) &&
       !_mongoc_write_concern_needs_gle (write_concern)) {
      if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
         bson_set_error (error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot set bypassDocumentValidation for unacknowledged writes");
         EXIT;
      }
      gLegacyWriteOps[command->type] (command, client, server_stream, database,
                                      collection, write_concern, offset,
                                      result, error);
      EXIT;
   }

   if (!command->n_documents ||
       !bson_iter_init (&iter, command->documents) ||
       !bson_iter_next (&iter)) {
      _empty_error (command, error);
      result->failed = true;
      EXIT;
   }

again:
   bson_init (&cmd);
   has_more = false;
   i = 0;

   BSON_APPEND_UTF8 (&cmd, gCommandNames[command->type], collection);
   BSON_APPEND_DOCUMENT (&cmd, "writeConcern",
                         WRITE_CONCERN_DOC (write_concern));
   BSON_APPEND_BOOL (&cmd, "ordered", command->flags.ordered);
   if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
      BSON_APPEND_BOOL (&cmd, "bypassDocumentValidation",
                        !!command->flags.bypass_document_validation);
   }

   if (!_mongoc_write_command_will_overflow (0,
                                             command->documents->len,
                                             command->n_documents,
                                             max_bson_obj_size,
                                             max_write_batch_size)) {
      /* copy the whole documents buffer as e.g. "updates": [...] */
      BSON_APPEND_ARRAY (&cmd,
                         gCommandFields[command->type],
                         command->documents);
      i = command->n_documents;
   } else {
      bson_append_array_begin (&cmd, gCommandFields[command->type], -1, &ar);

      do {
         if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
            BSON_ASSERT (false);
         }

         bson_iter_document (&iter, &len, &data);
         key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);

         if (_mongoc_write_command_will_overflow (ar.len,
                                                  key_len + len + 2,
                                                  i,
                                                  max_bson_obj_size,
                                                  max_write_batch_size)) {
            has_more = true;
            break;
         }

         if (!bson_init_static (&tmp, data, len)) {
            BSON_ASSERT (false);
         }

         BSON_APPEND_DOCUMENT (&ar, key, &tmp);

         bson_destroy (&tmp);

         i++;
      } while (bson_iter_next (&iter));

      bson_append_array_end (&cmd, &ar);
   }

   if (!i) {
      too_large_error (error, i, len, max_bson_obj_size, NULL);
      result->failed = true;
      ret = false;
   } else {
      ret = mongoc_cluster_run_command (&client->cluster, server_stream->stream,
                                        MONGOC_QUERY_NONE, database, &cmd,
                                        &reply, error);

      if (!ret) {
         result->failed = true;
      }

      _mongoc_write_result_merge (result, command, &reply, offset);
      offset += i;
      bson_destroy (&reply);
   }

   bson_destroy (&cmd);

   if (has_more && (ret || !command->flags.ordered)) {
      GOTO (again);
   }

   EXIT;
}
static void
_mongoc_write_command_update_legacy (mongoc_write_command_t       *command,
                                     mongoc_client_t              *client,
                                     mongoc_server_stream_t       *server_stream,
                                     const char                   *database,
                                     const char                   *collection,
                                     const mongoc_write_concern_t *write_concern,
                                     uint32_t                      offset,
                                     mongoc_write_result_t        *result,
                                     bson_error_t                 *error)
{
   mongoc_rpc_t rpc;
   bson_iter_t iter, subiter, subsubiter;
   bson_t doc;
   bool has_update, has_selector, is_upsert;
   bson_t update, selector;
   bson_t *gle = NULL;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns [MONGOC_NAMESPACE_MAX + 1];
   int32_t affected = 0;
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL
               | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   bson_iter_init (&iter, command->documents);
   while (bson_iter_next (&iter)) {
      if (bson_iter_recurse (&iter, &subiter) &&
          bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         bson_init_static (&doc, data, len);

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter) [0] != '$') &&
             !bson_validate (&doc, (bson_validate_flags_t)vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_iter_init (&iter, command->documents);
   while (bson_iter_next (&iter)) {
      rpc.update.msg_len = 0;
      rpc.update.request_id = 0;
      rpc.update.response_to = 0;
      rpc.update.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      has_update = false;
      has_selector = false;
      is_upsert = false;

      bson_iter_recurse (&iter, &subiter);
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            rpc.update.update = data;
            bson_init_static (&update, data, len);
            has_update = true;
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            rpc.update.selector = data;
            bson_init_static (&selector, data, len);
            has_selector = true;
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t)(
                     rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t)(
                     rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
            is_upsert = true;
         }
      }

      if (!mongoc_cluster_sendv_to_server (&client->cluster,
                                           &rpc, 1, server_stream,
                                           write_concern, error)) {
         result->failed = true;
         EXIT;
      }

      if (_mongoc_write_concern_needs_gle (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            EXIT;
         }

         if (bson_iter_init_find (&subiter, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&subiter)) {
            affected = bson_iter_int32 (&subiter);
         }

         /*
          * CDRIVER-372:
          *
          * Versions of MongoDB before 2.6 don't return the _id for an
          * upsert if _id is not an ObjectId.
          */
         if (is_upsert &&
             affected &&
             !bson_iter_init_find (&subiter, gle, "upserted") &&
             bson_iter_init_find (&subiter, gle, "updatedExisting") &&
             BSON_ITER_HOLDS_BOOL (&subiter) &&
             !bson_iter_bool (&subiter)) {
            if (has_update && bson_iter_init_find (&subiter, &update, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            } else if (has_selector &&
                       bson_iter_init_find (&subiter, &selector, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            }
         }

         _mongoc_write_result_merge_legacy (
            result, command, gle,
            MONGOC_ERROR_COLLECTION_UPDATE_FAILED, offset);

         offset++;
         bson_destroy (gle);
      }
   }

   EXIT;
}
static void
_mongoc_write_command_insert_legacy (mongoc_write_command_t       *command,
                                     mongoc_client_t              *client,
                                     mongoc_server_stream_t       *server_stream,
                                     const char                   *database,
                                     const char                   *collection,
                                     const mongoc_write_concern_t *write_concern,
                                     uint32_t                      offset,
                                     mongoc_write_result_t        *result,
                                     bson_error_t                 *error)
{
   uint32_t current_offset;
   mongoc_iovec_t *iov;
   const uint8_t *data;
   mongoc_rpc_t rpc;
   bson_iter_t iter;
   uint32_t len;
   bson_t *gle = NULL;
   uint32_t size = 0;
   bool has_more;
   char ns [MONGOC_NAMESPACE_MAX + 1];
   bool r;
   uint32_t n_docs_in_batch;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   current_offset = offset;

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   r = bson_iter_init (&iter, command->documents);

   if (!r) {
      BSON_ASSERT (false);
      EXIT;
   }

   if (!command->n_documents || !bson_iter_next (&iter)) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *)bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t)(sizeof (mongoc_rpc_header_t) +
                     4 +
                     strlen (database) +
                     1 +
                     strlen (collection) +
                     1);

   do {
      BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter));
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx < command->n_documents);

      bson_iter_document (&iter, &len, &data);

      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);

      if (len > max_bson_obj_size) {
         /* document is too large */
         bson_t write_err_doc = BSON_INITIALIZER;

         too_large_error (error, idx, len,
                          max_bson_obj_size, &write_err_doc);

         _mongoc_write_result_merge_legacy (
            result, command, &write_err_doc,
            MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx);

         bson_destroy (&write_err_doc);

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) data;
         iov[n_docs_in_batch].iov_len = len;
         size += len;
         n_docs_in_batch++;
      }

      idx++;
   } while (bson_iter_next (&iter));

   if (n_docs_in_batch) {
      rpc.insert.msg_len = 0;
      rpc.insert.request_id = 0;
      rpc.insert.response_to = 0;
      rpc.insert.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags = (
         (command->flags.ordered) ? MONGOC_INSERT_NONE
                          : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      if (!mongoc_cluster_sendv_to_server (&client->cluster,
                                           &rpc, 1, server_stream,
                                           write_concern, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      if (_mongoc_write_concern_needs_gle (write_concern)) {
         bool err = false;
         bson_iter_t citer;

         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            GOTO (cleanup);
         }

         err = (bson_iter_init_find (&citer, gle, "err")
                && bson_iter_as_bool (&citer));

         /*
          * Overwrite the "n" field since it will be zero. Otherwise, our
          * merge_legacy code will not know how many we tried in this batch.
          */
         if (!err &&
             bson_iter_init_find (&citer, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&citer) &&
             !bson_iter_int32 (&citer)) {
            bson_iter_overwrite_int32 (&citer, n_docs_in_batch);
         }
      }
   }

cleanup:

   if (gle) {
      _mongoc_write_result_merge_legacy (
         result, command, gle, MONGOC_ERROR_COLLECTION_INSERT_FAILED,
         current_offset);

      current_offset = offset + idx;
      bson_destroy (gle);
      gle = NULL;
   }

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
static void
_mongoc_write_command_delete_legacy (mongoc_write_command_t       *command,
                                     mongoc_client_t              *client,
                                     mongoc_server_stream_t       *server_stream,
                                     const char                   *database,
                                     const char                   *collection,
                                     const mongoc_write_concern_t *write_concern,
                                     uint32_t                      offset,
                                     mongoc_write_result_t        *result,
                                     bson_error_t                 *error)
{
   const uint8_t *data;
   mongoc_rpc_t rpc;
   bson_iter_t iter;
   bson_iter_t q_iter;
   uint32_t len;
   bson_t *gle = NULL;
   char ns [MONGOC_NAMESPACE_MAX + 1];
   bool r;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   r = bson_iter_init (&iter, command->documents);

   if (!r) {
      BSON_ASSERT (false);
      EXIT;
   }

   if (!command->n_documents || !bson_iter_next (&iter)) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_DELETE_FAILED,
                      "Cannot do an empty delete.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   do {
      /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
      r = (bson_iter_recurse (&iter, &q_iter) &&
           bson_iter_find (&q_iter, "q") &&
           BSON_ITER_HOLDS_DOCUMENT (&q_iter));

      if (!r) {
         BSON_ASSERT (false);
         EXIT;
      }

      bson_iter_document (&q_iter, &len, &data);
      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);

      rpc.delete_.msg_len = 0;
      rpc.delete_.request_id = 0;
      rpc.delete_.response_to = 0;
      rpc.delete_.opcode = MONGOC_OPCODE_DELETE;
      rpc.delete_.zero = 0;
      rpc.delete_.collection = ns;
      rpc.delete_.flags = command->u.delete_.multi ? MONGOC_DELETE_NONE
                         : MONGOC_DELETE_SINGLE_REMOVE;
      rpc.delete_.selector = data;

      if (!mongoc_cluster_sendv_to_server (&client->cluster,
                                           &rpc, 1, server_stream,
                                           write_concern, error)) {
         result->failed = true;
         EXIT;
      }

      if (_mongoc_write_concern_needs_gle (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            EXIT;
         }

         _mongoc_write_result_merge_legacy (
            result, command, gle,
            MONGOC_ERROR_COLLECTION_DELETE_FAILED, offset);

         offset++;
         bson_destroy (gle);
      }
   } while (bson_iter_next (&iter));

   EXIT;
}
void
_mongoc_write_result_merge (mongoc_write_result_t  *result,  /* IN */
                            mongoc_write_command_t *command, /* IN */
                            const bson_t           *reply,   /* IN */
                            uint32_t                offset)
{
   int32_t server_index = 0;
   const bson_value_t *value;
   bson_iter_t iter;
   bson_iter_t citer;
   bson_iter_t ar;
   int32_t n_upserted = 0;
   int32_t affected = 0;

   ENTRY;

   BSON_ASSERT (result);
   BSON_ASSERT (reply);

   if (bson_iter_init_find (&iter, reply, "n") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      affected = bson_iter_int32 (&iter);
   }

   if (bson_iter_init_find (&iter, reply, "writeErrors") &&
       BSON_ITER_HOLDS_ARRAY (&iter) &&
       bson_iter_recurse (&iter, &citer) &&
       bson_iter_next (&citer)) {
      result->failed = true;
   }

   switch (command->type) {
   case MONGOC_WRITE_COMMAND_INSERT:
      result->nInserted += affected;
      break;
   case MONGOC_WRITE_COMMAND_DELETE:
      result->nRemoved += affected;
      break;
   case MONGOC_WRITE_COMMAND_UPDATE:

      /* server returns each upserted _id with its index into this batch
       * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */
      if (bson_iter_init_find (&iter, reply, "upserted")) {
         if (BSON_ITER_HOLDS_ARRAY (&iter) &&
             (bson_iter_recurse (&iter, &ar))) {

            while (bson_iter_next (&ar)) {
               if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
                   bson_iter_recurse (&ar, &citer) &&
                   bson_iter_find (&citer, "index") &&
                   BSON_ITER_HOLDS_INT32 (&citer)) {
                  server_index = bson_iter_int32 (&citer);

                  if (bson_iter_recurse (&ar, &citer) &&
                      bson_iter_find (&citer, "_id")) {
                     value = bson_iter_value (&citer);
                     _mongoc_write_result_append_upsert (result,
                                                         offset + server_index,
                                                         value);
                     n_upserted++;
                  }
               }
            }
         }
         result->nUpserted += n_upserted;
         /*
          * XXX: The following addition to nMatched needs some checking.
          *      I'm highly skeptical of it.
          */
         result->nMatched += BSON_MAX (0, (affected - n_upserted));
      } else {
         result->nMatched += affected;
      }
      /*
       * SERVER-13001 - in a mixed sharded cluster a call to update could
       * return nModified (>= 2.6) or not (<= 2.4).  If any call does not
       * return nModified we can't report a valid final count so omit the
       * field completely.
       */
      if (bson_iter_init_find (&iter, reply, "nModified") &&
          BSON_ITER_HOLDS_INT32 (&iter)) {
         result->nModified += bson_iter_int32 (&iter);
      } else {
         /*
          * nModified could be BSON_TYPE_NULL, which should also be omitted.
          */
         result->omit_nModified = true;
      }
      break;
   default:
      BSON_ASSERT (false);
      break;
   }

   if (bson_iter_init_find (&iter, reply, "writeErrors") &&
       BSON_ITER_HOLDS_ARRAY (&iter)) {
      _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors,
                                         &iter);
   }

   if (bson_iter_init_find (&iter, reply, "writeConcernError") &&
       BSON_ITER_HOLDS_DOCUMENT (&iter)) {

      uint32_t len;
      const uint8_t *data;
      bson_t write_concern_error;
      char str[16];
      const char *key;

      /* writeConcernError is a subdocument in the server response
       * append it to the result->writeConcernErrors array */
      bson_iter_document (&iter, &len, &data);
      bson_init_static (&write_concern_error, data, len);

      bson_uint32_to_string (result->n_writeConcernErrors, &key,
                             str, sizeof str);

      bson_append_document (&result->writeConcernErrors,
                            key, -1, &write_concern_error);

      result->n_writeConcernErrors++;
   }

   EXIT;
}
Beispiel #28
0
/* fire command-succeeded event as if we'd used a modern write command.
 * note, cluster.request_id was incremented once for the write, again
 * for the getLastError, so cluster.request_id is no longer valid; used the
 * passed-in request_id instead.
 */
static void
_mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client,
                                        int64_t duration,
                                        mongoc_write_command_t *command,
                                        const bson_t *gle,
                                        mongoc_server_stream_t *stream,
                                        int64_t request_id)
{
   bson_iter_t iter;
   bson_t doc;
   int64_t ok = 1;
   int64_t n = 0;
   uint32_t code = 8;
   bool wtimeout = false;

   /* server error message */
   const char *errmsg = NULL;
   size_t errmsg_len = 0;

   /* server errInfo subdocument */
   bool has_errinfo = false;
   uint32_t len;
   const uint8_t *data;
   bson_t errinfo;

   /* server upsertedId value */
   bool has_upserted_id = false;
   bson_value_t upserted_id;

   /* server updatedExisting value */
   bool has_updated_existing = false;
   bool updated_existing = false;

   mongoc_apm_command_succeeded_t event;

   ENTRY;

   if (!client->apm_callbacks.succeeded) {
      EXIT;
   }

   /* first extract interesting fields from getlasterror response */
   if (gle) {
      bson_iter_init (&iter, gle);
      while (bson_iter_next (&iter)) {
         if (!strcmp (bson_iter_key (&iter), "ok")) {
            ok = bson_iter_as_int64 (&iter);
         } else if (!strcmp (bson_iter_key (&iter), "n")) {
            n = bson_iter_as_int64 (&iter);
         } else if (!strcmp (bson_iter_key (&iter), "code")) {
            code = (uint32_t) bson_iter_as_int64 (&iter);
            if (code == 0) {
               /* server sent non-numeric error code? */
               code = 8;
            }
         } else if (!strcmp (bson_iter_key (&iter), "upserted")) {
            has_upserted_id = true;
            bson_value_copy (bson_iter_value (&iter), &upserted_id);
         } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) {
            has_updated_existing = true;
            updated_existing = bson_iter_as_bool (&iter);
         } else if ((!strcmp (bson_iter_key (&iter), "err") ||
                     !strcmp (bson_iter_key (&iter), "errmsg")) &&
                    BSON_ITER_HOLDS_UTF8 (&iter)) {
            errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len);
         } else if (!strcmp (bson_iter_key (&iter), "errInfo") &&
                    BSON_ITER_HOLDS_DOCUMENT (&iter)) {
            bson_iter_document (&iter, &len, &data);
            bson_init_static (&errinfo, data, len);
            has_errinfo = true;
         } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) {
            wtimeout = true;
         }
      }
   }

   /* based on PyMongo's _convert_write_result() */
   bson_init (&doc);
   bson_append_int32 (&doc, "ok", 2, (int32_t) ok);

   if (errmsg && !wtimeout) {
      /* Failure, but pass to the success callback. Command Monitoring Spec:
       * "Commands that executed on the server and return a status of {ok: 1}
       * are considered successful commands and fire CommandSucceededEvent.
       * Commands that have write errors are included since the actual command
       * did succeed, only writes failed." */
      append_write_err (
         &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL);
   } else {
      /* Success, perhaps with a writeConcernError. */
      if (errmsg) {
         append_write_concern_err (&doc, errmsg, errmsg_len);
      }

      if (command->type == MONGOC_WRITE_COMMAND_INSERT) {
         /* GLE result for insert is always 0 in most MongoDB versions. */
         n = command->n_documents;
      } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) {
         if (has_upserted_id) {
            append_upserted (&doc, &upserted_id);
         } else if (has_updated_existing && !updated_existing && n == 1) {
            bson_t tmp;
            int32_t bson_len = 0;

            memcpy (&bson_len, command->payload.data, 4);
            bson_len = BSON_UINT32_FROM_LE (bson_len);
            bson_init_static (&tmp, command->payload.data, bson_len);
            has_upserted_id = get_upserted_id (&tmp, &upserted_id);

            if (has_upserted_id) {
               append_upserted (&doc, &upserted_id);
            }
         }
      }
   }

   bson_append_int32 (&doc, "n", 1, (int32_t) n);

   mongoc_apm_command_succeeded_init (
      &event,
      duration,
      &doc,
      _mongoc_command_type_to_name (command->type),
      request_id,
      command->operation_id,
      &stream->sd->host,
      stream->sd->id,
      client->apm_context);

   client->apm_callbacks.succeeded (&event);

   mongoc_apm_command_succeeded_cleanup (&event);
   bson_destroy (&doc);

   if (has_upserted_id) {
      bson_value_destroy (&upserted_id);
   }

   EXIT;
}
Beispiel #29
0
struct transaction* transaction_from_bson(bson_t const* doc)
{
    char key[9];
    bson_iter_t iter;
    bson_iter_t subiter;
    struct transaction* tx = transaction_new();

    if(!bson_iter_init_find(&iter, doc, "version") || !BSON_ITER_HOLDS_INT32(&iter)) goto error;
    transaction_set_version(tx, bson_iter_int32(&iter));

    // Read Inputs
    if(!bson_iter_init_find(&iter, doc, "inputs") || !BSON_ITER_HOLDS_ARRAY(&iter)) goto error;

    uint32_t inputs_doc_length;
    uint8_t const* inputs_doc_data;
    bson_iter_array(&iter, &inputs_doc_length, &inputs_doc_data);

    bson_t inputs_doc;
    bson_init_static(&inputs_doc, inputs_doc_data, inputs_doc_length);

    size_t index = 0;
    for(;;) {
        bson_snprintf(key, sizeof(key), "%u", (unsigned int)index);
        key[sizeof(key) - 1] = '\0';

        // If the array key isn't found, then we reached the end of the array
        if(!bson_iter_init_find(&subiter, &inputs_doc, key)) break;

        // If it's not a document, then there's an error
        if(!BSON_ITER_HOLDS_DOCUMENT(&subiter)) goto error;

        struct transaction_input* input = transaction_input_new();
        struct transaction_output_reference* output_reference = transaction_input_output_reference(input);

        // Load the input document
        bson_t element_doc;
        uint32_t element_doc_length;
        uint8_t const* element_doc_data;
        bson_iter_document(&subiter, &element_doc_length, &element_doc_data);
        bson_init_static(&element_doc, element_doc_data, element_doc_length);

        bson_iter_t elementiter;

        // Output reference
        if(!bson_iter_init_find(&elementiter, &element_doc, "output_reference") || !BSON_ITER_HOLDS_DOCUMENT(&elementiter)) goto error;
        bson_t output_reference_doc;
        uint32_t output_reference_doc_length;
        uint8_t const* output_reference_doc_data;
        bson_iter_document(&elementiter, &output_reference_doc_length, &output_reference_doc_data);
        bson_init_static(&output_reference_doc, output_reference_doc_data, output_reference_doc_length);

        bson_iter_t output_reference_iter;

        uint8_t const* hash;
        uint32_t hash_size;

        if(!bson_iter_init_find(&output_reference_iter, &output_reference_doc, "hash") || !BSON_ITER_HOLDS_BINARY(&output_reference_iter)) goto error;
        bson_iter_binary(&output_reference_iter, BSON_SUBTYPE_BINARY, &hash_size, &hash);
        assert(hash_size == 32);
        transaction_output_reference_set_hash(output_reference, (unsigned char const*)hash);

        if(!bson_iter_init_find(&output_reference_iter, &output_reference_doc, "index") || !BSON_ITER_HOLDS_INT32(&output_reference_iter)) goto error;
        transaction_output_reference_set_index(output_reference, bson_iter_int32(&output_reference_iter));

        // Script
        if(!bson_iter_init_find(&elementiter, &element_doc, "script") || !BSON_ITER_HOLDS_BINARY(&elementiter)) goto error;
        uint32_t script_size;
        uint8_t const* script_data;
        bson_iter_binary(&elementiter, BSON_SUBTYPE_BINARY, &script_size, &script_data);
        struct script* script;
        size_t script_size_result;
        script_size_result = unserialize_script((unsigned char const*)script_data, script_size, &script, script_size);
        assert(script_size_result == script_size);
        transaction_input_set_script(input, script);

        // Sequence
        if(!bson_iter_init_find(&elementiter, &element_doc, "sequence") || !BSON_ITER_HOLDS_INT32(&elementiter)) goto error;
        transaction_input_set_sequence(input, bson_iter_int32(&elementiter));

        transaction_add_input(tx, input);
        index += 1;
    }

    // Read Outputs
    if(!bson_iter_init_find(&iter, doc, "outputs") || !BSON_ITER_HOLDS_ARRAY(&iter)) goto error;

    uint32_t outputs_doc_length;
    uint8_t const* outputs_doc_data;
    bson_iter_array(&iter, &outputs_doc_length, &outputs_doc_data);

    bson_t outputs_doc;
    bson_init_static(&outputs_doc, outputs_doc_data, outputs_doc_length);

    index = 0;
    for(;;) {
        bson_snprintf(key, sizeof(key), "%u", (unsigned int)index);
        key[sizeof(key) - 1] = '\0';

        // If the array key isn't found, then we reached the end of the array
        if(!bson_iter_init_find(&subiter, &outputs_doc, key)) break;

        // If it's not a document, then there's an error
        if(!BSON_ITER_HOLDS_DOCUMENT(&subiter)) goto error;

        struct transaction_output* output = transaction_output_new();

        // Load the output document
        bson_t element_doc;
        uint32_t element_doc_length;
        uint8_t const* element_doc_data;
        bson_iter_document(&subiter, &element_doc_length, &element_doc_data);
        bson_init_static(&element_doc, element_doc_data, element_doc_length);

        bson_iter_t elementiter;

        // Value
        if(!bson_iter_init_find(&elementiter, &element_doc, "value") || !BSON_ITER_HOLDS_INT64(&elementiter)) goto error;
        transaction_output_set_value(output, bson_iter_int64(&elementiter));

        // Script
        if(!bson_iter_init_find(&elementiter, &element_doc, "script") || !BSON_ITER_HOLDS_BINARY(&elementiter)) goto error;
        uint32_t script_size;
        uint8_t const* script_data;
        bson_iter_binary(&elementiter, BSON_SUBTYPE_BINARY, &script_size, &script_data);
        struct script* script;
        size_t script_size_result;
        script_size_result = unserialize_script((unsigned char const*)script_data, script_size, &script, script_size);
        assert(script_size_result == script_size);
        transaction_output_set_script(output, script);

        transaction_add_output(tx, output);
        index += 1;
    }

    if(!bson_iter_init_find(&iter, doc, "lock_time") || !BSON_ITER_HOLDS_INT32(&iter)) goto error;
    transaction_set_lock_time(tx, bson_iter_int32(&iter));

    return tx;
error:
    return NULL;
}
Beispiel #30
0
static void
test_bson_iter_fuzz (void)
{
   uint8_t *data;
   uint32_t len = 512;
   uint32_t len_le;
   uint32_t r;
   bson_iter_t iter;
   bson_t *b;
   uint32_t i;
   int pass;

   len_le = BSON_UINT32_TO_LE(len);

   for (pass = 0; pass < FUZZ_N_PASSES; pass++) {
      data = bson_malloc0(len);
      memcpy(data, &len_le, sizeof (len_le));

      for (i = 4; i < len; i += 4) {
         r = rand();
         memcpy(&data[i], &r, sizeof (r));
      }

      if (!(b = bson_new_from_data(data, len))) {
         /*
          * It could fail on buffer length or missing trailing null byte.
          */
         bson_free (data);
         continue;
      }

      BSON_ASSERT(b);

      /*
       * TODO: Most of the following ignores the key. That should be fixed
       *       but has it's own perils too.
       */

      assert(bson_iter_init(&iter, b));
      while (bson_iter_next(&iter)) {
         assert(iter.next_off < len);
         switch (bson_iter_type(&iter)) {
         case BSON_TYPE_ARRAY:
         case BSON_TYPE_DOCUMENT:
            {
               const uint8_t *child = NULL;
               uint32_t child_len = 0;

               bson_iter_document(&iter, &child_len, &child);
               if (child_len) {
                  assert(child);
                  assert(child_len >= 5);
                  assert((iter.off + child_len) < b->len);
                  assert(child_len < (uint32_t) -1);
                  memcpy(&child_len, child, sizeof (child_len));
                  child_len = BSON_UINT32_FROM_LE(child_len);
                  assert(child_len >= 5);
               }
            }
            break;
         case BSON_TYPE_DOUBLE:
         case BSON_TYPE_UTF8:
         case BSON_TYPE_BINARY:
         case BSON_TYPE_UNDEFINED:
            break;
         case BSON_TYPE_OID:
            assert(iter.off + 12 < iter.len);
            break;
         case BSON_TYPE_BOOL:
         case BSON_TYPE_DATE_TIME:
         case BSON_TYPE_NULL:
         case BSON_TYPE_REGEX:
            /* TODO: check for 2 valid cstring. */
         case BSON_TYPE_DBPOINTER:
         case BSON_TYPE_CODE:
         case BSON_TYPE_SYMBOL:
         case BSON_TYPE_CODEWSCOPE:
         case BSON_TYPE_INT32:
         case BSON_TYPE_TIMESTAMP:
         case BSON_TYPE_INT64:
         case BSON_TYPE_DECIMAL128:
         case BSON_TYPE_MAXKEY:
         case BSON_TYPE_MINKEY:
            break;
         case BSON_TYPE_EOD:
         default:
            /* Code should not be reached. */
            assert(false);
            break;
         }
      }

      bson_destroy(b);
      bson_free(data);
   }
}