void
mongoc_bulk_operation_insert (mongoc_bulk_operation_t *bulk,
                              const bson_t            *document)
{
   mongoc_write_command_t command = { 0 };
   mongoc_write_command_t *last;

   ENTRY;

   BSON_ASSERT (bulk);
   BSON_ASSERT (document);

   if (bulk->commands.len) {
      last = &_mongoc_array_index (&bulk->commands,
                                   mongoc_write_command_t,
                                   bulk->commands.len - 1);

      if (last->type == MONGOC_WRITE_COMMAND_INSERT) {
         _mongoc_write_command_insert_append (last, document);
         EXIT;
      }
   }

   _mongoc_write_command_init_insert (
      &command, document, bulk->flags, bulk->operation_id,
      !mongoc_write_concern_is_acknowledged (bulk->write_concern));

   _mongoc_array_append_val (&bulk->commands, command);

   EXIT;
}
Exemple #2
0
bool
mongoc_bulk_operation_insert_with_opts (mongoc_bulk_operation_t *bulk,
                                        const bson_t *document,
                                        const bson_t *opts,
                                        bson_error_t *error)
{
   mongoc_bulk_insert_opts_t insert_opts;
   mongoc_write_command_t command = {0};
   mongoc_write_command_t *last;
   bool ret = false;

   ENTRY;

   BSON_ASSERT (bulk);
   BSON_ASSERT (document);

   BULK_RETURN_IF_PRIOR_ERROR;

   if (!_mongoc_bulk_insert_opts_parse (
          bulk->client, opts, &insert_opts, error)) {
      GOTO (done);
   }

   if (!_mongoc_validate_new_document (document, insert_opts.validate, error)) {
      GOTO (done);
   }

   if (bulk->commands.len) {
      last = &_mongoc_array_index (
         &bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);

      if (last->type == MONGOC_WRITE_COMMAND_INSERT) {
         _mongoc_write_command_insert_append (last, document);
         ret = true;
         GOTO (done);
      }
   }

   _mongoc_write_command_init_insert (
      &command,
      document,
      opts,
      bulk->flags,
      bulk->operation_id,
      !mongoc_write_concern_is_acknowledged (bulk->write_concern));

   _mongoc_array_append_val (&bulk->commands, command);

   ret = true;

done:
   _mongoc_bulk_insert_opts_cleanup (&insert_opts);
   RETURN (ret);
}
Exemple #3
0
/* {{{ proto boolean WriteResult::isAcknowledged()
   Returns whether the write operation was acknowledged (based on the write
   concern). */
PHP_METHOD(WriteResult, isAcknowledged)
{
	php_phongo_writeresult_t *intern;
	SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)


	intern = Z_WRITERESULT_OBJ_P(getThis());

	if (zend_parse_parameters_none() == FAILURE) {
		return;
	}


	RETURN_BOOL(mongoc_write_concern_is_acknowledged(intern->write_concern));
}
static void
test_write_concern_fsync_and_journal_gle_and_validity (void)
{
   mongoc_write_concern_t *write_concern = mongoc_write_concern_new();

   /*
    * Journal and fsync should imply GLE regardless of w; however, journal and
    * fsync logically conflict with w=0 and w=-1, so a write concern with such
    * a combination of options will be considered invalid.
    */

   /* No write concern needs GLE, but not "valid" */
   ASSERT(mongoc_write_concern_is_acknowledged (NULL));
   ASSERT(!mongoc_write_concern_is_valid (NULL));

   /* Default write concern needs GLE and is valid */
   ASSERT(write_concern);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_is_valid (write_concern));
   ASSERT(!mongoc_write_concern_journal_is_set (write_concern));

   /* w=0 does not need GLE and is valid */
   mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED);
   ASSERT(!mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_is_valid (write_concern));
   ASSERT(!mongoc_write_concern_journal_is_set (write_concern));

   /* fsync=true needs GLE, but it conflicts with w=0 */
   mongoc_write_concern_set_fsync(write_concern, true);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(!mongoc_write_concern_is_valid (write_concern));
   ASSERT(!mongoc_write_concern_journal_is_set (write_concern));
   mongoc_write_concern_set_fsync(write_concern, false);

   /* journal=true needs GLE, but it conflicts with w=0 */
   mongoc_write_concern_set_journal(write_concern, true);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(!mongoc_write_concern_is_valid (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));
   mongoc_write_concern_set_journal(write_concern, false);

   /* w=-1 does not need GLE and is valid */
   mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED);
   ASSERT(!mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_is_valid (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));

   /* fsync=true needs GLE, but it conflicts with w=-1 */
   mongoc_write_concern_set_fsync(write_concern, true);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(!mongoc_write_concern_is_valid (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));

   /* journal=true needs GLE, but it conflicts with w=-1 */
   mongoc_write_concern_set_fsync(write_concern, false);
   mongoc_write_concern_set_journal(write_concern, true);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));

   /* fsync=true with w=default needs GLE and is valid */
   mongoc_write_concern_set_journal(write_concern, false);
   mongoc_write_concern_set_fsync(write_concern, true);
   mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_is_valid (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));

   /* journal=true with w=default needs GLE and is valid */
   mongoc_write_concern_set_journal(write_concern, false);
   mongoc_write_concern_set_fsync(write_concern, true);
   mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT);
   ASSERT(mongoc_write_concern_is_acknowledged (write_concern));
   ASSERT(mongoc_write_concern_is_valid (write_concern));
   ASSERT(mongoc_write_concern_journal_is_set (write_concern));

   mongoc_write_concern_destroy(write_concern);
}
void
_mongoc_write_command_execute_idl (mongoc_write_command_t *command,
                                   mongoc_client_t *client,
                                   mongoc_server_stream_t *server_stream,
                                   const char *database,
                                   const char *collection,
                                   uint32_t offset,
                                   const mongoc_crud_opts_t *crud,
                                   mongoc_write_result_t *result)
{
   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (database);
   BSON_ASSERT (collection);
   BSON_ASSERT (result);

   if (command->flags.has_collation) {
      if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) {
         result->failed = true;
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot set collation for unacknowledged writes");
         EXIT;
      }

      if (server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
                         "The selected server does not support collation");
         result->failed = true;
         EXIT;
      }
   }

   if (command->flags.has_array_filters) {
      if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) {
         result->failed = true;
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot use array filters with unacknowledged writes");
         EXIT;
      }

      if (server_stream->sd->max_wire_version < WIRE_VERSION_ARRAY_FILTERS) {
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
                         "The selected server does not support array filters");
         result->failed = true;
         EXIT;
      }
   }

   if (command->flags.bypass_document_validation !=
       MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
      if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) {
         result->failed = true;
         bson_set_error (
            &result->error,
            MONGOC_ERROR_COMMAND,
            MONGOC_ERROR_COMMAND_INVALID_ARG,
            "Cannot set bypassDocumentValidation for unacknowledged writes");
         EXIT;
      }
   }

   if (crud->client_session &&
       !mongoc_write_concern_is_acknowledged (crud->writeConcern)) {
      result->failed = true;
      bson_set_error (&result->error,
                      MONGOC_ERROR_COMMAND,
                      MONGOC_ERROR_COMMAND_INVALID_ARG,
                      "Cannot use client session with unacknowledged writes");
      EXIT;
   }

   if (command->payload.len == 0) {
      _empty_error (command, &result->error);
      EXIT;
   }

   if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
      _mongoc_write_opmsg (command,
                           client,
                           server_stream,
                           database,
                           collection,
                           crud->writeConcern,
                           offset,
                           crud->client_session,
                           result,
                           &result->error);
   } else {
      if (mongoc_write_concern_is_acknowledged (crud->writeConcern)) {
         _mongoc_write_opquery (command,
                                client,
                                server_stream,
                                database,
                                collection,
                                crud->writeConcern,
                                offset,
                                result,
                                &result->error);
      } else {
         gLegacyWriteOps[command->type](command,
                                        client,
                                        server_stream,
                                        database,
                                        collection,
                                        offset,
                                        result,
                                        &result->error);
      }
   }

   EXIT;
}
/* complete a write result, including only certain fields */
bool
_mongoc_write_result_complete (
   mongoc_write_result_t *result,             /* IN */
   int32_t error_api_version,                 /* IN */
   const mongoc_write_concern_t *wc,          /* IN */
   mongoc_error_domain_t err_domain_override, /* IN */
   bson_t *bson,                              /* OUT */
   bson_error_t *error,                       /* OUT */
   ...)
{
   mongoc_error_domain_t domain;
   va_list args;
   const char *field;
   int n_args;
   bson_iter_t iter;
   bson_iter_t child;

   ENTRY;

   BSON_ASSERT (result);

   if (error_api_version >= MONGOC_ERROR_API_VERSION_2) {
      domain = MONGOC_ERROR_SERVER;
   } else if (err_domain_override) {
      domain = err_domain_override;
   } else if (result->error.domain) {
      domain = (mongoc_error_domain_t) result->error.domain;
   } else {
      domain = MONGOC_ERROR_COLLECTION;
   }

   /* produce either old fields like nModified from the deprecated Bulk API Spec
    * or new fields like modifiedCount from the CRUD Spec, which we partly obey
    */
   if (bson && mongoc_write_concern_is_acknowledged (wc)) {
      n_args = 0;
      va_start (args, error);
      while ((field = va_arg (args, const char *))) {
         n_args++;

         if (!strcmp (field, "nInserted")) {
            BSON_APPEND_INT32 (bson, field, result->nInserted);
         } else if (!strcmp (field, "insertedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nInserted);
         } else if (!strcmp (field, "nMatched")) {
            BSON_APPEND_INT32 (bson, field, result->nMatched);
         } else if (!strcmp (field, "matchedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nMatched);
         } else if (!strcmp (field, "nModified")) {
            BSON_APPEND_INT32 (bson, field, result->nModified);
         } else if (!strcmp (field, "modifiedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nModified);
         } else if (!strcmp (field, "nRemoved")) {
            BSON_APPEND_INT32 (bson, field, result->nRemoved);
         } else if (!strcmp (field, "deletedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nRemoved);
         } else if (!strcmp (field, "nUpserted")) {
            BSON_APPEND_INT32 (bson, field, result->nUpserted);
         } else if (!strcmp (field, "upsertedCount")) {
            BSON_APPEND_INT32 (bson, field, result->nUpserted);
         } else if (!strcmp (field, "upserted") &&
                    !bson_empty0 (&result->upserted)) {
            BSON_APPEND_ARRAY (bson, field, &result->upserted);
         } else if (!strcmp (field, "upsertedId") &&
                    !bson_empty0 (&result->upserted) &&
                    bson_iter_init_find (&iter, &result->upserted, "0") &&
                    bson_iter_recurse (&iter, &child) &&
                    bson_iter_find (&child, "_id")) {
            /* "upsertedId", singular, for update_one() */
            BSON_APPEND_VALUE (bson, "upsertedId", bson_iter_value (&child));
         }
      }

      va_end (args);

      /* default: a standard result includes all Bulk API fields */
      if (!n_args) {
         BSON_APPEND_INT32 (bson, "nInserted", result->nInserted);
         BSON_APPEND_INT32 (bson, "nMatched", result->nMatched);
         BSON_APPEND_INT32 (bson, "nModified", result->nModified);
         BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved);
         BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted);
         if (!bson_empty0 (&result->upserted)) {
            BSON_APPEND_ARRAY (bson, "upserted", &result->upserted);
         }
      }

      /* always append errors if there are any */
      if (!n_args || !bson_empty (&result->writeErrors)) {
         BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors);
      }

      if (result->n_writeConcernErrors) {
         BSON_APPEND_ARRAY (
            bson, "writeConcernErrors", &result->writeConcernErrors);
      }
   }
bool HHVM_METHOD(MongoDBDriverWriteResult, isAcknowledged)
{
	MongoDBDriverWriteResultData* data = Native::data<MongoDBDriverWriteResultData>(this_);

	return mongoc_write_concern_is_acknowledged(data->m_write_concern);
}
Exemple #8
0
void
_mongoc_write_command_execute (
   mongoc_write_command_t *command,             /* IN */
   mongoc_client_t *client,                     /* IN */
   mongoc_server_stream_t *server_stream,       /* IN */
   const char *database,                        /* IN */
   const char *collection,                      /* IN */
   const mongoc_write_concern_t *write_concern, /* IN */
   uint32_t offset,                             /* IN */
   mongoc_client_session_t *cs,                 /* IN */
   mongoc_write_result_t *result)               /* OUT */
{
   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (database);
   BSON_ASSERT (collection);
   BSON_ASSERT (result);

   if (!write_concern) {
      write_concern = client->write_concern;
   }

   if (!mongoc_write_concern_is_valid (write_concern)) {
      bson_set_error (&result->error,
                      MONGOC_ERROR_COMMAND,
                      MONGOC_ERROR_COMMAND_INVALID_ARG,
                      "The write concern is invalid.");
      result->failed = true;
      EXIT;
   }

   if (command->flags.has_collation) {
      if (!mongoc_write_concern_is_acknowledged (write_concern)) {
         result->failed = true;
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot set collation for unacknowledged writes");
         EXIT;
      }
      if (server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
         bson_set_error (&result->error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
                         "Collation is not supported by the selected server");
         result->failed = true;
         EXIT;
      }
   }
   if (command->flags.bypass_document_validation !=
       MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
      if (!mongoc_write_concern_is_acknowledged (write_concern)) {
         result->failed = true;
         bson_set_error (
            &result->error,
            MONGOC_ERROR_COMMAND,
            MONGOC_ERROR_COMMAND_INVALID_ARG,
            "Cannot set bypassDocumentValidation for unacknowledged writes");
         EXIT;
      }
   }
   if (command->payload.len == 0) {
      _empty_error (command, &result->error);
      EXIT;
   }

   if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
      if (cs && !mongoc_write_concern_is_acknowledged (write_concern)) {
         bson_set_error (
            &result->error,
            MONGOC_ERROR_COMMAND,
            MONGOC_ERROR_COMMAND_INVALID_ARG,
            "Cannot use client session with unacknowledged writes");
         EXIT;
      }
      _mongoc_write_opmsg (command,
                           client,
                           server_stream,
                           database,
                           collection,
                           write_concern,
                           offset,
                           cs,
                           result,
                           &result->error);
   } else {
      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         _mongoc_write_opquery (command,
                                client,
                                server_stream,
                                database,
                                collection,
                                write_concern,
                                offset,
                                result,
                                &result->error);
      } else {
         gLegacyWriteOps[command->type](command,
                                        client,
                                        server_stream,
                                        database,
                                        collection,
                                        offset,
                                        result,
                                        &result->error);
      }
   }

   EXIT;
}
Exemple #9
0
void
_mongoc_write_command_update_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bool has_update, has_selector, is_upsert;
   bson_t update, selector;
   bson_t *gle = NULL;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int32_t affected = 0;
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         bson_init_static (&doc, data, len);

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      has_update = false;
      has_selector = false;
      is_upsert = false;

      bson_iter_init (&subiter, bson);
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            bson_init_static (&update, data, len);
            has_update = true;
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            bson_init_static (&selector, data, len);
            has_selector = true;
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
            is_upsert = true;
         }
      }

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         if (bson_iter_init_find (&subiter, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&subiter)) {
            affected = bson_iter_int32 (&subiter);
         }

         /*
          * CDRIVER-372:
          *
          * Versions of MongoDB before 2.6 don't return the _id for an
          * upsert if _id is not an ObjectId.
          */
         if (is_upsert && affected &&
             !bson_iter_init_find (&subiter, gle, "upserted") &&
             bson_iter_init_find (&subiter, gle, "updatedExisting") &&
             BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) {
            if (has_update && bson_iter_init_find (&subiter, &update, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            } else if (has_selector &&
                       bson_iter_init_find (&subiter, &selector, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            }
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_UPDATE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
Exemple #10
0
void
_mongoc_write_command_insert_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   uint32_t current_offset;
   mongoc_iovec_t *iov;
   mongoc_rpc_t rpc;
   bson_t *gle = NULL;
   uint32_t size = 0;
   bool has_more;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   uint32_t n_docs_in_batch;
   uint32_t request_id = 0;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;
   int data_offset = 0;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   started = bson_get_monotonic_time ();
   current_offset = offset;

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
                      strlen (collection) + 1);

   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);
   while ((bson = bson_reader_read (reader, &eof))) {
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx <= command->n_documents);

      if (bson->len > max_bson_obj_size) {
         /* document is too large */
         bson_t write_err_doc = BSON_INITIALIZER;

         _mongoc_write_command_too_large_error (
            error, idx, bson->len, max_bson_obj_size, &write_err_doc);

         _mongoc_write_result_merge_legacy (
            result,
            command,
            &write_err_doc,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_INSERT_FAILED,
            offset + idx);

         bson_destroy (&write_err_doc);
         data_offset += bson->len;

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) ||
                 size > (max_msg_size - bson->len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
         iov[n_docs_in_batch].iov_len = bson->len;
         size += bson->len;
         n_docs_in_batch++;
         data_offset += bson->len;
      }

      idx++;
   }
   bson_reader_destroy (reader);

   if (n_docs_in_batch) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags =
         ((command->flags.ordered) ? MONGOC_INSERT_NONE
                                   : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         bool err = false;
         bson_iter_t citer;

         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            GOTO (cleanup);
         }

         err = (bson_iter_init_find (&citer, gle, "err") &&
                bson_iter_as_bool (&citer));

         /*
          * Overwrite the "n" field since it will be zero. Otherwise, our
          * merge_legacy code will not know how many we tried in this batch.
          */
         if (!err && bson_iter_init_find (&citer, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) {
            bson_iter_overwrite_int32 (&citer, n_docs_in_batch);
         }
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }

cleanup:

   if (gle) {
      _mongoc_write_result_merge_legacy (result,
                                         command,
                                         gle,
                                         client->error_api_version,
                                         MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                                         current_offset);

      current_offset = offset + idx;
      bson_destroy (gle);
      gle = NULL;
   }

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
Exemple #11
0
void
_mongoc_write_command_delete_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   const uint8_t *data;
   mongoc_rpc_t rpc;
   uint32_t request_id;
   bson_iter_t q_iter;
   uint32_t len;
   int64_t limit = 0;
   bson_t *gle = NULL;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   bool r;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_DELETE_FAILED,
                      "Cannot do an empty delete.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
      r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") &&
           BSON_ITER_HOLDS_DOCUMENT (&q_iter));

      BSON_ASSERT (r);
      bson_iter_document (&q_iter, &len, &data);
      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);
      if (len > max_bson_obj_size) {
         _mongoc_write_command_too_large_error (
            error, 0, len, max_bson_obj_size, NULL);
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_DELETE;
      rpc.delete_.zero = 0;
      rpc.delete_.collection = ns;

      if (bson_iter_find (&q_iter, "limit") &&
          (BSON_ITER_HOLDS_INT (&q_iter))) {
         limit = bson_iter_as_int64 (&q_iter);
      }

      rpc.delete_.flags =
         limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
      rpc.delete_.selector = data;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_DELETE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);

   EXIT;
}