Beispiel #1
0
static void
append_array (bson_t *bson, const char *key, const bson_t *array)
{
   if (array->len) {
      BSON_APPEND_ARRAY (bson, key, array);
   } else {
      bson_t tmp = BSON_INITIALIZER;
      BSON_APPEND_ARRAY (bson, key, &tmp);
      bson_destroy (&tmp);
   }
}
Beispiel #2
0
static void
td_to_bson (const mongoc_topology_description_t *td,
            bson_t                              *bson)
{
   size_t i;
   bson_t servers = BSON_INITIALIZER;
   bson_t server;
   char str[16];
   const char *key;

   for (i = 0; i < td->servers->items_len; i++) {
      bson_uint32_to_string ((uint32_t) i, &key, str, sizeof str);
      sd_to_bson (mongoc_set_get_item (td->servers, (int) i), &server);
      BSON_APPEND_DOCUMENT (&servers, key, &server);
      bson_destroy (&server);
   }

   bson_init (bson);
   BSON_APPEND_UTF8 (bson, "topologyType",
                     mongoc_topology_description_type (td));

   if (td->set_name) {
      BSON_APPEND_UTF8 (bson, "setName", td->set_name);
   }

   BSON_APPEND_ARRAY (bson, "servers", &servers);

   bson_destroy (&servers);
}
Beispiel #3
0
SEXP R_mongo_collection_update(SEXP ptr_col, SEXP ptr_selector, SEXP ptr_update, SEXP ptr_filters, SEXP upsert, SEXP multiple, SEXP replace){
  mongoc_collection_t *col = r2col(ptr_col);
  bson_t *selector = r2bson(ptr_selector);
  bson_t *update = r2bson(ptr_update);

  bool success;
  bson_t opts;
  bson_init (&opts);
  BSON_APPEND_BOOL (&opts, "upsert", Rf_asLogical(upsert));
  if(!Rf_isNull(ptr_filters))
    BSON_APPEND_ARRAY (&opts, "arrayFilters",  r2bson(ptr_filters));

  bson_error_t err;
  bson_t reply;
  if(Rf_asLogical(replace)){
    success = mongoc_collection_replace_one(col, selector, update, &opts, &reply, &err);
  } else {
    success = Rf_asLogical(multiple) ?
      mongoc_collection_update_many(col, selector, update, &opts, &reply, &err) :
      mongoc_collection_update_one(col, selector, update, &opts, &reply, &err);
  }

  if(!success)
    stop(err.message);

  return bson2list(&reply);
}
bool
_mongoc_write_result_complete (mongoc_write_result_t *result,
                               bson_t                *bson,
                               bson_error_t          *error)
{
   ENTRY;

   BSON_ASSERT (result);

   if (bson) {
      BSON_APPEND_INT32 (bson, "nInserted", result->nInserted);
      BSON_APPEND_INT32 (bson, "nMatched", result->nMatched);
      if (!result->omit_nModified) {
         BSON_APPEND_INT32 (bson, "nModified", result->nModified);
      }
      BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved);
      BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted);
      if (!bson_empty0 (&result->upserted)) {
         BSON_APPEND_ARRAY (bson, "upserted", &result->upserted);
      }
      BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors);
      if (result->n_writeConcernErrors) {
         BSON_APPEND_ARRAY (bson, "writeConcernErrors",
                            &result->writeConcernErrors);
      }
   }

   /* set bson_error_t from first write error or write concern error */
   _set_error_from_response (&result->writeErrors,
                             MONGOC_ERROR_COMMAND,
                             "write",
                             &result->error);

   if (!result->error.code) {
      _set_error_from_response (&result->writeConcernErrors,
                                MONGOC_ERROR_WRITE_CONCERN,
                                "write concern",
                                &result->error);
   }

   if (error) {
      memcpy (error, &result->error, sizeof *error);
   }

   RETURN (!result->failed && result->error.code == 0);
}
/*---------------------------------------------------------------------------
 *
 * _change_stream_init --
 *
 *       Called after @stream has the collection name, database name, read
 *       preferences, and read concern set. Creates the change streams
 *       cursor.
 *
 *--------------------------------------------------------------------------
 */
void
_change_stream_init (mongoc_change_stream_t *stream,
                     const bson_t *pipeline,
                     const bson_t *opts)
{
   BSON_ASSERT (pipeline);

   stream->max_await_time_ms = -1;
   stream->batch_size = -1;
   bson_init (&stream->pipeline_to_append);
   bson_init (&stream->resume_token);
   bson_init (&stream->err_doc);

   if (!_mongoc_change_stream_opts_parse (
          stream->client, opts, &stream->opts, &stream->err)) {
      return;
   }

   stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument);

   if (!bson_empty (&(stream->opts.resumeAfter))) {
      bson_append_document (
         &stream->resume_token, "resumeAfter", 11, &(stream->opts.resumeAfter));
   }

   _mongoc_timestamp_set (&stream->operation_time,
                          &(stream->opts.startAtOperationTime));

   stream->batch_size = stream->opts.batchSize;
   stream->max_await_time_ms = stream->opts.maxAwaitTimeMS;

   /* Accept two forms of user pipeline:
    * 1. A document like: { "pipeline": [...] }
    * 2. An array-like document: { "0": {}, "1": {}, ... }
    * If the passed pipeline is invalid, we pass it along and let the server
    * error instead.
    */
   if (!bson_empty (pipeline)) {
      bson_iter_t iter;
      if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
          BSON_ITER_HOLDS_ARRAY (&iter)) {
         if (!BSON_APPEND_VALUE (&stream->pipeline_to_append,
                                 "pipeline",
                                 bson_iter_value (&iter))) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      } else {
         if (!BSON_APPEND_ARRAY (
                &stream->pipeline_to_append, "pipeline", pipeline)) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      }
   }

   if (stream->err.code == 0) {
      (void) _make_cursor (stream);
   }
}
/**
 * mongoc_database_add_user:
 * @database: A #mongoc_database_t.
 * @username: A string containing the username.
 * @password: (allow-none): A string containing password, or NULL.
 * @roles: (allow-none): An optional bson_t of roles.
 * @custom_data: (allow-none): An optional bson_t of data to store.
 * @error: (out) (allow-none): A location for a bson_error_t or %NULL.
 *
 * Creates a new user with access to @database.
 *
 * Returns: None.
 * Side effects: None.
 */
bool
mongoc_database_add_user (mongoc_database_t *database,
                          const char        *username,
                          const char        *password,
                          const bson_t      *roles,
                          const bson_t      *custom_data,
                          bson_error_t      *error)
{
   bson_error_t lerror;
   bson_t cmd;
   bson_t ar;
   char *input;
   char *hashed_password;
   bool ret = false;

   ENTRY;

   BSON_ASSERT (database);
   BSON_ASSERT (username);

   /*
    * CDRIVER-232:
    *
    * Perform a (slow and tedious) round trip to mongod to determine if
    * we can safely call createUser. Otherwise, we will fallback and
    * perform legacy insertion into users collection.
    */
   bson_init (&cmd);
   BSON_APPEND_UTF8 (&cmd, "usersInfo", username);
   ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror);
   bson_destroy (&cmd);

   if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
      ret = mongoc_database_add_user_legacy (database, username, password, error);
   } else if (ret || (lerror.code == 13)) {
      /* usersInfo succeeded or failed with auth err, we're on modern mongod */
      input = bson_strdup_printf ("%s:mongo:%s", username, password);
      hashed_password = _mongoc_hex_md5 (input);
      bson_free (input);

      bson_init (&cmd);
      BSON_APPEND_UTF8 (&cmd, "createUser", username);
      BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password);
      BSON_APPEND_BOOL (&cmd, "digestPassword", false);
      if (custom_data) {
         BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data);
      }
      if (roles) {
         BSON_APPEND_ARRAY (&cmd, "roles", roles);
      } else {
         bson_append_array_begin (&cmd, "roles", 5, &ar);
         bson_append_array_end (&cmd, &ar);
      }

      ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);

      bson_free (hashed_password);
      bson_destroy (&cmd);
   } else if (error) {
      memcpy (error, &lerror, sizeof *error);
   }

   RETURN (ret);
}
Beispiel #7
0
/* complete a write result, including only certain fields */
bool
_mongoc_write_result_complete (
   mongoc_write_result_t *result,             /* IN */
   int32_t error_api_version,                 /* IN */
   const mongoc_write_concern_t *wc,          /* IN */
   mongoc_error_domain_t err_domain_override, /* IN */
   bson_t *bson,                              /* OUT */
   bson_error_t *error,                       /* OUT */
   ...)
{
   mongoc_error_domain_t domain;
   va_list args;
   const char *field;
   int n_args;
   bson_iter_t iter;
   bson_iter_t child;

   ENTRY;

   BSON_ASSERT (result);

   if (error_api_version >= MONGOC_ERROR_API_VERSION_2) {
      domain = MONGOC_ERROR_SERVER;
   } else if (err_domain_override) {
      domain = err_domain_override;
   } else if (result->error.domain) {
      domain = (mongoc_error_domain_t) result->error.domain;
   } else {
      domain = MONGOC_ERROR_COLLECTION;
   }

   /* produce either old fields like nModified from the deprecated Bulk API Spec
    * or new fields like modifiedCount from the CRUD Spec, which we partly obey
    */
   if (bson && mongoc_write_concern_is_acknowledged (wc)) {
      n_args = 0;
      va_start (args, error);
      while ((field = va_arg (args, const char *))) {
         n_args++;

         if (!strcmp (field, "nInserted")) {
            BSON_APPEND_INT32 (bson, field, result->nInserted);
         } else if (!strcmp (field, "insertedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nInserted);
         } else if (!strcmp (field, "nMatched")) {
            BSON_APPEND_INT32 (bson, field, result->nMatched);
         } else if (!strcmp (field, "matchedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nMatched);
         } else if (!strcmp (field, "nModified")) {
            BSON_APPEND_INT32 (bson, field, result->nModified);
         } else if (!strcmp (field, "modifiedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nModified);
         } else if (!strcmp (field, "nRemoved")) {
            BSON_APPEND_INT32 (bson, field, result->nRemoved);
         } else if (!strcmp (field, "deletedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nRemoved);
         } else if (!strcmp (field, "nUpserted")) {
            BSON_APPEND_INT32 (bson, field, result->nUpserted);
         } else if (!strcmp (field, "upsertedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nUpserted);
         } else if (!strcmp (field, "upserted") &&
                    !bson_empty0 (&result->upserted)) {
            BSON_APPEND_ARRAY (bson, field, &result->upserted);
         } else if (!strcmp (field, "upsertedId") &&
                    !bson_empty0 (&result->upserted) &&
                    bson_iter_init_find (&iter, &result->upserted, "0") &&
                    bson_iter_recurse (&iter, &child) &&
                    bson_iter_find (&child, "_id")) {
            /* "upsertedId", singular, for update_one() */
            BSON_APPEND_VALUE (bson, "upsertedId", bson_iter_value (&child));
         }
      }

      va_end (args);

      /* default: a standard result includes all Bulk API fields */
      if (!n_args) {
         BSON_APPEND_INT32 (bson, "nInserted", result->nInserted);
         BSON_APPEND_INT32 (bson, "nMatched", result->nMatched);
         BSON_APPEND_INT32 (bson, "nModified", result->nModified);
         BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved);
         BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted);
         if (!bson_empty0 (&result->upserted)) {
            BSON_APPEND_ARRAY (bson, "upserted", &result->upserted);
         }
      }

      /* always append errors if there are any */
      if (!n_args || !bson_empty (&result->writeErrors)) {
         BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors);
      }

      if (result->n_writeConcernErrors) {
         BSON_APPEND_ARRAY (
            bson, "writeConcernErrors", &result->writeConcernErrors);
      }
   }
static void
_mongoc_write_command(mongoc_write_command_t       *command,
                      mongoc_client_t              *client,
                      mongoc_server_stream_t       *server_stream,
                      const char                   *database,
                      const char                   *collection,
                      const mongoc_write_concern_t *write_concern,
                      uint32_t                      offset,
                      mongoc_write_result_t        *result,
                      bson_error_t                 *error)
{
   const uint8_t *data;
   bson_iter_t iter;
   const char *key;
   uint32_t len = 0;
   bson_t tmp;
   bson_t ar;
   bson_t cmd;
   bson_t reply;
   char str [16];
   bool has_more;
   bool ret = false;
   uint32_t i;
   int32_t max_bson_obj_size;
   int32_t max_write_batch_size;
   int32_t min_wire_version;
   uint32_t key_len;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream);

   /*
    * If we have an unacknowledged write and the server supports the legacy
    * opcodes, then submit the legacy opcode so we don't need to wait for
    * a response from the server.
    */

   min_wire_version = server_stream->sd->min_wire_version;
   if ((min_wire_version == 0) &&
       !_mongoc_write_concern_needs_gle (write_concern)) {
      if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
         bson_set_error (error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot set bypassDocumentValidation for unacknowledged writes");
         EXIT;
      }
      gLegacyWriteOps[command->type] (command, client, server_stream, database,
                                      collection, write_concern, offset,
                                      result, error);
      EXIT;
   }

   if (!command->n_documents ||
       !bson_iter_init (&iter, command->documents) ||
       !bson_iter_next (&iter)) {
      _empty_error (command, error);
      result->failed = true;
      EXIT;
   }

again:
   bson_init (&cmd);
   has_more = false;
   i = 0;

   BSON_APPEND_UTF8 (&cmd, gCommandNames[command->type], collection);
   BSON_APPEND_DOCUMENT (&cmd, "writeConcern",
                         WRITE_CONCERN_DOC (write_concern));
   BSON_APPEND_BOOL (&cmd, "ordered", command->flags.ordered);
   if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
      BSON_APPEND_BOOL (&cmd, "bypassDocumentValidation",
                        !!command->flags.bypass_document_validation);
   }

   if (!_mongoc_write_command_will_overflow (0,
                                             command->documents->len,
                                             command->n_documents,
                                             max_bson_obj_size,
                                             max_write_batch_size)) {
      /* copy the whole documents buffer as e.g. "updates": [...] */
      BSON_APPEND_ARRAY (&cmd,
                         gCommandFields[command->type],
                         command->documents);
      i = command->n_documents;
   } else {
      bson_append_array_begin (&cmd, gCommandFields[command->type], -1, &ar);

      do {
         if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
            BSON_ASSERT (false);
         }

         bson_iter_document (&iter, &len, &data);
         key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);

         if (_mongoc_write_command_will_overflow (ar.len,
                                                  key_len + len + 2,
                                                  i,
                                                  max_bson_obj_size,
                                                  max_write_batch_size)) {
            has_more = true;
            break;
         }

         if (!bson_init_static (&tmp, data, len)) {
            BSON_ASSERT (false);
         }

         BSON_APPEND_DOCUMENT (&ar, key, &tmp);

         bson_destroy (&tmp);

         i++;
      } while (bson_iter_next (&iter));

      bson_append_array_end (&cmd, &ar);
   }

   if (!i) {
      too_large_error (error, i, len, max_bson_obj_size, NULL);
      result->failed = true;
      ret = false;
   } else {
      ret = mongoc_cluster_run_command (&client->cluster, server_stream->stream,
                                        MONGOC_QUERY_NONE, database, &cmd,
                                        &reply, error);

      if (!ret) {
         result->failed = true;
      }

      _mongoc_write_result_merge (result, command, &reply, offset);
      offset += i;
      bson_destroy (&reply);
   }

   bson_destroy (&cmd);

   if (has_more && (ret || !command->flags.ordered)) {
      GOTO (again);
   }

   EXIT;
}
Beispiel #9
0
mongoc_change_stream_t *
_mongoc_change_stream_new (const mongoc_collection_t *coll,
                           const bson_t *pipeline,
                           const bson_t *opts)
{
   bool full_doc_set = false;
   mongoc_change_stream_t *stream =
      (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t));

   BSON_ASSERT (coll);
   BSON_ASSERT (pipeline);

   stream->max_await_time_ms = -1;
   stream->batch_size = -1;
   stream->coll = mongoc_collection_copy ((mongoc_collection_t *) coll);
   bson_init (&stream->pipeline_to_append);
   bson_init (&stream->full_document);
   bson_init (&stream->opts);
   bson_init (&stream->resume_token);
   bson_init (&stream->err_doc);

   /*
    * The passed options may consist of:
    * fullDocument: 'default'|'updateLookup', passed to $changeStream stage
    * resumeAfter: optional<Doc>, passed to $changeStream stage
    * maxAwaitTimeMS: Optional<Int64>, set on the cursor
    * batchSize: Optional<Int32>, passed as agg option, {cursor: { batchSize: }}
    * standard command options like "sessionId", "maxTimeMS", or "collation"
    */

   if (opts) {
      bson_iter_t iter;

      if (bson_iter_init_find (&iter, opts, "fullDocument")) {
         SET_BSON_OR_ERR (&stream->full_document, "fullDocument");
         full_doc_set = true;
      }

      if (bson_iter_init_find (&iter, opts, "resumeAfter")) {
         SET_BSON_OR_ERR (&stream->resume_token, "resumeAfter");
      }

      if (bson_iter_init_find (&iter, opts, "batchSize")) {
         if (BSON_ITER_HOLDS_INT32 (&iter)) {
            stream->batch_size = bson_iter_int32 (&iter);
         }
      }

      if (bson_iter_init_find (&iter, opts, "maxAwaitTimeMS") &&
          BSON_ITER_HOLDS_INT (&iter)) {
         stream->max_await_time_ms = bson_iter_as_int64 (&iter);
      }

      /* save the remaining opts for mongoc_collection_read_command_with_opts */
      bson_copy_to_excluding_noinit (opts,
                                     &stream->opts,
                                     "fullDocument",
                                     "resumeAfter",
                                     "batchSize",
                                     "maxAwaitTimeMS",
                                     NULL);
   }

   if (!full_doc_set) {
      if (!BSON_APPEND_UTF8 (
             &stream->full_document, "fullDocument", "default")) {
         CHANGE_STREAM_ERR ("fullDocument");
      }
   }

   /* Accept two forms of user pipeline:
    * 1. A document like: { "pipeline": [...] }
    * 2. An array-like document: { "0": {}, "1": {}, ... }
    * If the passed pipeline is invalid, we pass it along and let the server
    * error instead.
    */
   if (!bson_empty (pipeline)) {
      bson_iter_t iter;
      if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
          BSON_ITER_HOLDS_ARRAY (&iter)) {
         SET_BSON_OR_ERR (&stream->pipeline_to_append, "pipeline");
      } else {
         if (!BSON_APPEND_ARRAY (
                &stream->pipeline_to_append, "pipeline", pipeline)) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      }
   }

   if (stream->err.code == 0) {
      (void) _make_cursor (stream);
   }

   return stream;
}