Exemplo n.º 1
0
/* Construct the aggregate command in cmd:
 * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */
static void
_make_command (mongoc_change_stream_t *stream, bson_t *command)
{
   bson_iter_t iter;
   bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */
   bson_t change_stream_doc;
   bson_t pipeline;
   bson_t cursor_doc;

   bson_init (command);
   bson_append_utf8 (command,
                     "aggregate",
                     9,
                     stream->coll->collection,
                     stream->coll->collectionlen);
   bson_append_array_begin (command, "pipeline", 8, &pipeline);

   /* Append the $changeStream stage */
   bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage);
   bson_append_document_begin (
      &change_stream_stage, "$changeStream", 13, &change_stream_doc);
   bson_concat (&change_stream_doc, &stream->full_document);
   if (!bson_empty (&stream->resume_token)) {
      bson_concat (&change_stream_doc, &stream->resume_token);
   }
   bson_append_document_end (&change_stream_stage, &change_stream_doc);
   bson_append_document_end (&pipeline, &change_stream_stage);

   /* Append user pipeline if it exists */
   if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") &&
       BSON_ITER_HOLDS_ARRAY (&iter)) {
      bson_iter_t child_iter;
      uint32_t key_int = 1;
      char buf[16];
      const char *key_str;

      BSON_ASSERT (bson_iter_recurse (&iter, &child_iter));
      while (bson_iter_next (&child_iter)) {
         /* The user pipeline may consist of invalid stages or non-documents.
          * Append anyway, and rely on the server error. */
         size_t keyLen =
            bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf));
         bson_append_value (
            &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter));
         ++key_int;
      }
   }

   bson_append_array_end (command, &pipeline);

   /* Add batch size if needed */
   bson_append_document_begin (command, "cursor", 6, &cursor_doc);
   if (stream->batch_size > 0) {
      bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size);
   }
   bson_append_document_end (command, &cursor_doc);
}
Exemplo n.º 2
0
void
_mongoc_write_command_insert_append (mongoc_write_command_t *command,
                                     const bson_t *document)
{
   bson_iter_t iter;
   bson_oid_t oid;
   bson_t tmp;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
   BSON_ASSERT (document);
   BSON_ASSERT (document->len >= 5);

   /*
    * If the document does not contain an "_id" field, we need to generate
    * a new oid for "_id".
    */
   if (!bson_iter_init_find (&iter, document, "_id")) {
      bson_init (&tmp);
      bson_oid_init (&oid, NULL);
      BSON_APPEND_OID (&tmp, "_id", &oid);
      bson_concat (&tmp, document);
      _mongoc_buffer_append (&command->payload, bson_get_data (&tmp), tmp.len);
      bson_destroy (&tmp);
   } else {
      _mongoc_buffer_append (
         &command->payload, bson_get_data (document), document->len);
   }

   command->n_documents++;

   EXIT;
}
Exemplo n.º 3
0
void
_mongoc_write_command_delete_append (mongoc_write_command_t *command,
                                     const bson_t *selector,
                                     const bson_t *opts)
{
   bson_t document;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_DELETE);
   BSON_ASSERT (selector);

   BSON_ASSERT (selector->len >= 5);

   bson_init (&document);
   BSON_APPEND_DOCUMENT (&document, "q", selector);
   if (opts) {
      bson_concat (&document, opts);
   }

   _mongoc_buffer_append (
      &command->payload, bson_get_data (&document), document.len);
   command->n_documents++;

   bson_destroy (&document);

   EXIT;
}
bool
mongoc_find_and_modify_opts_append (mongoc_find_and_modify_opts_t *opts,
                                    const bson_t *extra)
{
   BSON_ASSERT (opts);
   BSON_ASSERT (extra);

   return bson_concat (&opts->extra, extra);
}
Exemplo n.º 5
0
static future_t *
find_and_modify (func_ctx_t *ctx, bson_t *cmd)
{
   mongoc_find_and_modify_opts_t *fam;

   BSON_APPEND_UTF8 (cmd, "findAndModify", "collection");
   fam = mongoc_find_and_modify_opts_new ();
   bson_concat (&fam->extra, ctx->opts);

   /* destroy the mongoc_find_and_modify_opts_t later */
   ctx->data = fam;
   ctx->destructor = find_and_modify_cleanup;

   return future_collection_find_and_modify_with_opts (
      ctx->collection, tmp_bson ("{}"), fam, NULL, &ctx->error);
}
Exemplo n.º 6
0
static void
_mongoc_bulk_operation_update_append (
   mongoc_bulk_operation_t *bulk,
   const bson_t *selector,
   const bson_t *document,
   const mongoc_bulk_update_opts_t *update_opts,
   const bson_t *extra_opts)
{
   mongoc_write_command_t command = {0};
   mongoc_write_command_t *last;
   bson_t opts;
   bool has_collation;

   bson_init (&opts);
   bson_append_bool (&opts, "upsert", 6, update_opts->upsert);
   bson_append_bool (&opts, "multi", 5, update_opts->multi);

   has_collation = !bson_empty (&update_opts->collation);
   if (has_collation) {
      bson_append_document (&opts, "collation", 9, &update_opts->collation);
   }

   if (extra_opts) {
      bson_concat (&opts, extra_opts);
   }

   if (bulk->commands.len) {
      last = &_mongoc_array_index (
         &bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);
      if (last->type == MONGOC_WRITE_COMMAND_UPDATE) {
         last->flags.has_collation |= has_collation;
         last->flags.has_multi_write |= update_opts->multi;
         _mongoc_write_command_update_append (last, selector, document, &opts);
         bson_destroy (&opts);
         return;
      }
   }

   _mongoc_write_command_init_update (
      &command, selector, document, &opts, bulk->flags, bulk->operation_id);

   command.flags.has_collation = has_collation;
   command.flags.has_multi_write = update_opts->multi;

   _mongoc_array_append_val (&bulk->commands, command);
   bson_destroy (&opts);
}
Exemplo n.º 7
0
void core::concatenate(const bsoncxx::document::view& view) {
    bson_t other;
    bson_init_static(&other, view.data(), view.length());

    if (_impl->is_array()) {
        bson_iter_t iter;
        bson_iter_init(&iter, &other);

        while (bson_iter_next(&iter)) {
            stdx::string_view key = _impl->next_key();

            bson_append_iter(_impl->back(), key.data(), key.length(), &iter);
        }

    } else {
        bson_concat(_impl->back(), &other);
    }
}
Exemplo n.º 8
0
void
perl_mongo_sv_to_bson (bson_t * bson, SV *sv, HV *opts) {

  if (!SvROK (sv)) {
    croak ("not a reference");
  }

  if ( ! sv_isobject(sv) ) {
    switch ( SvTYPE(SvRV(sv)) ) {
      case SVt_PVHV:
        hvdoc_to_bson (bson, sv, opts, EMPTY_STACK);
        break;
      case SVt_PVAV:
        avdoc_to_bson(bson, sv, opts, EMPTY_STACK);
        break;
      default:
        sv_dump(sv);
        croak ("type unhandled");
    }
  }
  else {
    SV *obj;
    char *class;

    obj = SvRV(sv);
    class = HvNAME(SvSTASH(obj));

    if ( strEQ(class, "Tie::IxHash") ) {
      ixhashdoc_to_bson(bson, sv, opts, EMPTY_STACK);
    }
    else if ( strEQ(class, "MongoDB::BSON::_EncodedDoc") ) {
        STRLEN str_len;
        SV **svp;
        SV *encoded;
        const char *bson_str;
        bson_t *child;

        encoded = _hv_fetchs_sv((HV *)obj, "bson");
        bson_str = SvPV(encoded, str_len);
        child = bson_new_from_data((uint8_t*) bson_str, str_len);
        bson_concat(bson, child);
        bson_destroy(child);
    }
    else if (SvTYPE(obj) == SVt_PVHV) {
/**
 * mongoc_write_concern_freeze:
 * @write_concern: A mongoc_write_concern_t.
 *
 * This is an internal function.
 *
 * Freeze the write concern if necessary and encode it into a bson_ts which
 * represent the raw bson form and the get last error command form.
 *
 * You may not modify the write concern further after calling this function.
 */
static void
_mongoc_write_concern_freeze (mongoc_write_concern_t *write_concern)
{
   bson_t *compiled;
   bson_t *compiled_gle;

   BSON_ASSERT (write_concern);

   compiled = &write_concern->compiled;
   compiled_gle = &write_concern->compiled_gle;

   write_concern->frozen = true;

   bson_init (compiled);
   bson_init (compiled_gle);

   if (write_concern->w == MONGOC_WRITE_CONCERN_W_TAG) {
      BSON_ASSERT (write_concern->wtag);
      BSON_APPEND_UTF8 (compiled, "w", write_concern->wtag);
   } else if (write_concern->w == MONGOC_WRITE_CONCERN_W_MAJORITY) {
      BSON_APPEND_UTF8 (compiled, "w", "majority");
   } else if (write_concern->w == MONGOC_WRITE_CONCERN_W_DEFAULT) {
      /* Do Nothing */
   } else {
      BSON_APPEND_INT32 (compiled, "w", write_concern->w);
   }

   if (write_concern->fsync_ != MONGOC_WRITE_CONCERN_FSYNC_DEFAULT) {
      bson_append_bool(compiled, "fsync", 5, !!write_concern->fsync_);
   }

   if (write_concern->journal != MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT) {
      bson_append_bool(compiled, "j", 1, !!write_concern->journal);
   }

   if (write_concern->wtimeout) {
      bson_append_int32(compiled, "wtimeout", 8, write_concern->wtimeout);
   }

   BSON_APPEND_INT32 (compiled_gle, "getlasterror", 1);
   bson_concat (compiled_gle, compiled);
}
void
_mongoc_write_command_insert_append (mongoc_write_command_t *command,
                                     const bson_t           *document)
{
   const char *key;
   bson_iter_t iter;
   bson_oid_t oid;
   bson_t tmp;
   char keydata [16];

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
   BSON_ASSERT (document);
   BSON_ASSERT (document->len >= 5);

   key = NULL;
   bson_uint32_to_string (command->n_documents,
                          &key, keydata, sizeof keydata);

   BSON_ASSERT (key);

   /*
    * If the document does not contain an "_id" field, we need to generate
    * a new oid for "_id".
    */
   if (!bson_iter_init_find (&iter, document, "_id")) {
      bson_init (&tmp);
      bson_oid_init (&oid, NULL);
      BSON_APPEND_OID (&tmp, "_id", &oid);
      bson_concat (&tmp, document);
      BSON_APPEND_DOCUMENT (command->documents, key, &tmp);
      bson_destroy (&tmp);
   } else {
      BSON_APPEND_DOCUMENT (command->documents, key, document);
   }

   command->n_documents++;

   EXIT;
}
Exemplo n.º 11
0
static void
test_bson_concat (void)
{
   bson_t a = BSON_INITIALIZER;
   bson_t b = BSON_INITIALIZER;
   bson_t c = BSON_INITIALIZER;

   bson_append_int32 (&a, "abc", 3, 1);
   bson_append_int32 (&b, "def", 3, 1);
   bson_concat (&a, &b);

   bson_append_int32 (&c, "abc", 3, 1);
   bson_append_int32 (&c, "def", 3, 1);

   assert (0 == bson_compare (&c, &a));

   bson_destroy (&a);
   bson_destroy (&b);
   bson_destroy (&c);
}
Exemplo n.º 12
0
/* add BSON we expect to be included in a command due to an inherited option.
 * e.g., when "count" inherits readConcern from the DB, it should include
 * readConcern: {level: 'database'} in the command body. */
void
add_expected_opt (opt_source_t opt_source, opt_type_t opt_type, bson_t *cmd)
{
   const char *source_name;
   bson_t *opt;

   if (opt_source & OPT_SOURCE_FUNC) {
      source_name = "function";
   } else if (opt_source & OPT_SOURCE_COLL) {
      source_name = "collection";
   } else if (opt_source & OPT_SOURCE_DB) {
      source_name = "database";
   } else if (opt_source & OPT_SOURCE_CLIENT) {
      source_name = "client";
   } else {
      MONGOC_ERROR ("opt_json called with OPT_SOURCE_NONE");
      abort ();
   }

   switch (opt_type) {
   case OPT_READ_CONCERN:
      opt = tmp_bson ("{'readConcern': {'level': '%s'}}", source_name);
      break;
   case OPT_WRITE_CONCERN:
      opt = tmp_bson ("{'writeConcern': {'w': '%s'}}", source_name);
      break;
   case OPT_READ_PREFS:
      opt = tmp_bson (
         "{'$readPreference': {'mode': 'secondary', 'tags': [{'%s': 'yes'}]}}",
         source_name);
      break;
   default:
      abort ();
   }

   bson_concat (cmd, opt);
}
/* Update result with the read prefs, following Server Selection Spec.
 * The driver must have discovered the server is a mongos.
 */
static void
_apply_read_preferences_mongos (const mongoc_read_prefs_t *read_prefs,
                                const bson_t *query_bson,
                                mongoc_apply_read_prefs_result_t *result /* OUT */)
{
   mongoc_read_mode_t mode;
   const bson_t *tags = NULL;
   bson_t child;
   const char *mode_str;

   mode = mongoc_read_prefs_get_mode (read_prefs);
   if (read_prefs) {
      tags = mongoc_read_prefs_get_tags (read_prefs);
   }

   /* Server Selection Spec says:
    *
    * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag
    *   and MUST NOT use $readPreference
    *
    * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and
    *   MUST also use $readPreference
    *
    * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol
    *   flag and MUST also use $readPreference
    *
    * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol
    *   flag. If the read preference contains a non-empty tag_sets parameter,
    *   drivers MUST use $readPreference; otherwise, drivers MUST NOT use
    *   $readPreference
    *
    * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and
    *   MUST also use $readPreference
    */
   if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) {
      result->flags |= MONGOC_QUERY_SLAVE_OK;

   } else if (mode != MONGOC_READ_PRIMARY) {
      result->flags |= MONGOC_QUERY_SLAVE_OK;

      /* Server Selection Spec: "When any $ modifier is used, including the
       * $readPreference modifier, the query MUST be provided using the $query
       * modifier".
       *
       * This applies to commands, too.
       */
      result->query_with_read_prefs = bson_new ();
      result->query_owned = true;

      if (bson_has_field (query_bson, "$query")) {
         bson_concat (result->query_with_read_prefs, query_bson);
      } else {
         bson_append_document (result->query_with_read_prefs,
                               "$query", 6, query_bson);
      }

      bson_append_document_begin (result->query_with_read_prefs,
                                  "$readPreference", 15, &child);
      mode_str = _get_read_mode_string (mode);
      bson_append_utf8 (&child, "mode", 4, mode_str, -1);
      if (!bson_empty0 (tags)) {
         bson_append_array (&child, "tags", 4, tags);
      }

      bson_append_document_end (result->query_with_read_prefs, &child);
   }
}
Exemplo n.º 14
0
/* construct the aggregate command in cmd. looks like one of the following:
 * for a collection change stream:
 *   { aggregate: collname, pipeline: [], cursor: { batchSize: x } }
 * for a database change stream:
 *   { aggregate: 1, pipeline: [], cursor: { batchSize: x } }
 * for a client change stream:
 *   { aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}],
 *     cursor: { batchSize: x } }
 */
static void
_make_command (mongoc_change_stream_t *stream, bson_t *command)
{
   bson_iter_t iter;
   bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */
   bson_t change_stream_doc;
   bson_t pipeline;
   bson_t cursor_doc;

   bson_init (command);
   if (stream->change_stream_type == MONGOC_CHANGE_STREAM_COLLECTION) {
      bson_append_utf8 (
         command, "aggregate", 9, stream->coll, (int) strlen (stream->coll));
   } else {
      bson_append_int32 (command, "aggregate", 9, 1);
   }

   bson_append_array_begin (command, "pipeline", 8, &pipeline);

   /* append the $changeStream stage. */
   bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage);
   bson_append_document_begin (
      &change_stream_stage, "$changeStream", 13, &change_stream_doc);
   bson_concat (&change_stream_doc, stream->full_document);
   if (!bson_empty (&stream->resume_token)) {
      bson_concat (&change_stream_doc, &stream->resume_token);
   }
   /* Change streams spec: "startAtOperationTime and resumeAfter are mutually
    * exclusive; if both startAtOperationTime and resumeAfter are set, the
    * server will return an error. Drivers MUST NOT throw a custom error, and
    * MUST defer to the server error." */
   if (!_mongoc_timestamp_empty (&stream->operation_time)) {
      _mongoc_timestamp_append (
         &stream->operation_time, &change_stream_doc, "startAtOperationTime");
   }

   if (stream->change_stream_type == MONGOC_CHANGE_STREAM_CLIENT) {
      bson_append_bool (&change_stream_doc, "allChangesForCluster", 20, true);
   }
   bson_append_document_end (&change_stream_stage, &change_stream_doc);
   bson_append_document_end (&pipeline, &change_stream_stage);

   /* Append user pipeline if it exists */
   if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") &&
       BSON_ITER_HOLDS_ARRAY (&iter)) {
      bson_iter_t child_iter;
      uint32_t key_int = 1;
      char buf[16];
      const char *key_str;

      BSON_ASSERT (bson_iter_recurse (&iter, &child_iter));
      while (bson_iter_next (&child_iter)) {
         /* the user pipeline may consist of invalid stages or non-documents.
          * append anyway, and rely on the server error. */
         size_t keyLen =
            bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf));
         bson_append_value (
            &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter));
         ++key_int;
      }
   }

   bson_append_array_end (command, &pipeline);

   /* Add batch size if needed */
   bson_append_document_begin (command, "cursor", 6, &cursor_doc);
   if (stream->batch_size > 0) {
      bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size);
   }
   bson_append_document_end (command, &cursor_doc);
}