/*---------------------------------------------------------------------------
 *
 * _change_stream_init --
 *
 *       Called after @stream has the collection name, database name, read
 *       preferences, and read concern set. Creates the change streams
 *       cursor.
 *
 *--------------------------------------------------------------------------
 */
void
_change_stream_init (mongoc_change_stream_t *stream,
                     const bson_t *pipeline,
                     const bson_t *opts)
{
   BSON_ASSERT (pipeline);

   stream->max_await_time_ms = -1;
   stream->batch_size = -1;
   bson_init (&stream->pipeline_to_append);
   bson_init (&stream->resume_token);
   bson_init (&stream->err_doc);

   if (!_mongoc_change_stream_opts_parse (
          stream->client, opts, &stream->opts, &stream->err)) {
      return;
   }

   stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument);

   if (!bson_empty (&(stream->opts.resumeAfter))) {
      bson_append_document (
         &stream->resume_token, "resumeAfter", 11, &(stream->opts.resumeAfter));
   }

   _mongoc_timestamp_set (&stream->operation_time,
                          &(stream->opts.startAtOperationTime));

   stream->batch_size = stream->opts.batchSize;
   stream->max_await_time_ms = stream->opts.maxAwaitTimeMS;

   /* Accept two forms of user pipeline:
    * 1. A document like: { "pipeline": [...] }
    * 2. An array-like document: { "0": {}, "1": {}, ... }
    * If the passed pipeline is invalid, we pass it along and let the server
    * error instead.
    */
   if (!bson_empty (pipeline)) {
      bson_iter_t iter;
      if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
          BSON_ITER_HOLDS_ARRAY (&iter)) {
         if (!BSON_APPEND_VALUE (&stream->pipeline_to_append,
                                 "pipeline",
                                 bson_iter_value (&iter))) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      } else {
         if (!BSON_APPEND_ARRAY (
                &stream->pipeline_to_append, "pipeline", pipeline)) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      }
   }

   if (stream->err.code == 0) {
      (void) _make_cursor (stream);
   }
}
bool
mongoc_change_stream_next (mongoc_change_stream_t *stream, const bson_t **bson)
{
   bson_iter_t iter;
   bool ret = false;

   BSON_ASSERT (stream);
   BSON_ASSERT (bson);

   if (stream->err.code != 0) {
      goto end;
   }

   BSON_ASSERT (stream->cursor);
   if (!mongoc_cursor_next (stream->cursor, bson)) {
      const bson_t *err_doc;
      bson_error_t err;
      bool resumable = false;

      if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) {
         /* no error occurred, just no documents left. */
         goto end;
      }

      resumable = _is_resumable_error (err_doc);
      if (resumable) {
         /* recreate the cursor. */
         mongoc_cursor_destroy (stream->cursor);
         stream->cursor = NULL;
         if (!_make_cursor (stream)) {
            goto end;
         }
         if (!mongoc_cursor_next (stream->cursor, bson)) {
            resumable =
               !mongoc_cursor_error_document (stream->cursor, &err, &err_doc);
            if (resumable) {
               /* empty batch. */
               goto end;
            }
         }
      }

      if (!resumable) {
         stream->err = err;
         bson_destroy (&stream->err_doc);
         bson_copy_to (err_doc, &stream->err_doc);
         goto end;
      }
   }

   /* we have received documents, either from the first call to next or after a
    * resume. */
   if (!bson_iter_init_find (&iter, *bson, "_id")) {
      bson_set_error (&stream->err,
                      MONGOC_ERROR_CURSOR,
                      MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN,
                      "Cannot provide resume functionality when the resume "
                      "token is missing");
      goto end;
   }

   /* copy the resume token. */
   bson_reinit (&stream->resume_token);
   BSON_APPEND_VALUE (
      &stream->resume_token, "resumeAfter", bson_iter_value (&iter));
   /* clear out the operation time, since we no longer need it to resume. */
   _mongoc_timestamp_clear (&stream->operation_time);
   ret = true;

end:
   /* Driver Sessions Spec: "When an implicit session is associated with a
    * cursor for use with getMore operations, the session MUST be returned to
    * the pool immediately following a getMore operation that indicates that the
    * cursor has been exhausted." */
   if (stream->implicit_session) {
      /* if creating the change stream cursor errored, it may be null. */
      if (!stream->cursor || stream->cursor->cursor_id == 0) {
         mongoc_client_session_destroy (stream->implicit_session);
         stream->implicit_session = NULL;
      }
   }
   return ret;
}
Esempio n. 3
0
mongoc_change_stream_t *
_mongoc_change_stream_new (const mongoc_collection_t *coll,
                           const bson_t *pipeline,
                           const bson_t *opts)
{
   bool full_doc_set = false;
   mongoc_change_stream_t *stream =
      (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t));

   BSON_ASSERT (coll);
   BSON_ASSERT (pipeline);

   stream->max_await_time_ms = -1;
   stream->batch_size = -1;
   stream->coll = mongoc_collection_copy ((mongoc_collection_t *) coll);
   bson_init (&stream->pipeline_to_append);
   bson_init (&stream->full_document);
   bson_init (&stream->opts);
   bson_init (&stream->resume_token);
   bson_init (&stream->err_doc);

   /*
    * The passed options may consist of:
    * fullDocument: 'default'|'updateLookup', passed to $changeStream stage
    * resumeAfter: optional<Doc>, passed to $changeStream stage
    * maxAwaitTimeMS: Optional<Int64>, set on the cursor
    * batchSize: Optional<Int32>, passed as agg option, {cursor: { batchSize: }}
    * standard command options like "sessionId", "maxTimeMS", or "collation"
    */

   if (opts) {
      bson_iter_t iter;

      if (bson_iter_init_find (&iter, opts, "fullDocument")) {
         SET_BSON_OR_ERR (&stream->full_document, "fullDocument");
         full_doc_set = true;
      }

      if (bson_iter_init_find (&iter, opts, "resumeAfter")) {
         SET_BSON_OR_ERR (&stream->resume_token, "resumeAfter");
      }

      if (bson_iter_init_find (&iter, opts, "batchSize")) {
         if (BSON_ITER_HOLDS_INT32 (&iter)) {
            stream->batch_size = bson_iter_int32 (&iter);
         }
      }

      if (bson_iter_init_find (&iter, opts, "maxAwaitTimeMS") &&
          BSON_ITER_HOLDS_INT (&iter)) {
         stream->max_await_time_ms = bson_iter_as_int64 (&iter);
      }

      /* save the remaining opts for mongoc_collection_read_command_with_opts */
      bson_copy_to_excluding_noinit (opts,
                                     &stream->opts,
                                     "fullDocument",
                                     "resumeAfter",
                                     "batchSize",
                                     "maxAwaitTimeMS",
                                     NULL);
   }

   if (!full_doc_set) {
      if (!BSON_APPEND_UTF8 (
             &stream->full_document, "fullDocument", "default")) {
         CHANGE_STREAM_ERR ("fullDocument");
      }
   }

   /* Accept two forms of user pipeline:
    * 1. A document like: { "pipeline": [...] }
    * 2. An array-like document: { "0": {}, "1": {}, ... }
    * If the passed pipeline is invalid, we pass it along and let the server
    * error instead.
    */
   if (!bson_empty (pipeline)) {
      bson_iter_t iter;
      if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
          BSON_ITER_HOLDS_ARRAY (&iter)) {
         SET_BSON_OR_ERR (&stream->pipeline_to_append, "pipeline");
      } else {
         if (!BSON_APPEND_ARRAY (
                &stream->pipeline_to_append, "pipeline", pipeline)) {
            CHANGE_STREAM_ERR ("pipeline");
         }
      }
   }

   if (stream->err.code == 0) {
      (void) _make_cursor (stream);
   }

   return stream;
}