void test_suite (mongoc_database_t   *db,
                 mongoc_collection_t *collection)
{
   bson_error_t error;
   bson_t query = BSON_INITIALIZER;
   int64_t count;
   bson_t *options, *pipeline;
   double start_time, end_time, delta_time;
   mongoc_cursor_t *cursor;

   count = mongoc_collection_count (collection, MONGOC_QUERY_NONE, &query, 0, 0, NULL, &error);
   printf ("mongoc_collection_count count: %"PRId64"\n", count);
   options = BCON_NEW ("cursor", "{", "}", "allowDiskUse", BCON_BOOL (1));
   pipeline = BCON_NEW (
      "pipeline", "[",
          "{",
             "$match", "{",
             "}",
          "}",
          "{",
             "$project", "{",
                "text", BCON_INT32 (1),
             "}",
          "}",
       "]"
   );
   start_time = dtimeofday ();
   cursor = mongoc_collection_aggregate (collection, MONGOC_QUERY_NONE, pipeline, options, NULL);
   count = mongoc_cursor_dump (cursor);
   end_time = dtimeofday ();
   delta_time = end_time - start_time + 0.0000001;
   printf ("mongoc_cursor_dump: secs: %.2f, count: %"PRId64", %.2f docs/sec\n", delta_time, count, count/delta_time);
}
static void
print_pipeline (mongoc_collection_t *collection)
{
   mongoc_cursor_t *cursor;
   bson_error_t error;
   const bson_t *doc;
   bson_t *pipeline;
   char *str;

   pipeline = BCON_NEW ("pipeline", "[",
      "{", "$group", "{", "_id", "$state", "total_pop", "{", "$sum", "$pop", "}", "}", "}",
      "{", "$match", "{", "total_pop", "{", "$gte", BCON_INT32 (10000000), "}", "}", "}",
   "]");

   cursor = mongoc_collection_aggregate (collection, MONGOC_QUERY_NONE, pipeline, NULL, NULL);

   while (mongoc_cursor_next (cursor, &doc)) {
      str = bson_as_json (doc, NULL);
      printf ("%s\n", str);
      bson_free (str);
   }

   if (mongoc_cursor_error (cursor, &error)) {
      fprintf (stderr, "Cursor Failure: %s\n", error.message);
   }

   mongoc_cursor_destroy (cursor);
   bson_destroy (pipeline);
}
SEXP R_mongo_collection_aggregate(SEXP ptr_col, SEXP ptr_pipeline, SEXP no_timeout) {
  mongoc_collection_t *col = r2col(ptr_col);
  bson_t *pipeline = r2bson(ptr_pipeline);

  mongoc_query_flags_t flags = MONGOC_QUERY_NONE;
  if(asLogical(no_timeout))
    flags += MONGOC_QUERY_NO_CURSOR_TIMEOUT;

  mongoc_cursor_t *c = mongoc_collection_aggregate (col, flags, pipeline, NULL, NULL);
  if(!c)
    stop("Error executing pipeline.");
  return cursor2r(c);
}
static future_t *
aggregate_raw_pipeline (func_ctx_t *ctx, bson_t *cmd)
{
   BSON_APPEND_UTF8 (cmd, "aggregate", "collection");
   ctx->cursor = mongoc_collection_aggregate (ctx->collection,
                                              MONGOC_QUERY_NONE,
                                              tmp_bson ("[{'$out': 'foo'}]"),
                                              ctx->opts,
                                              ctx->prefs);

   /* use ctx->data as the bson_t** out-param to mongoc_cursor_next () */
   return future_cursor_next (ctx->cursor, (const bson_t **) &ctx->data);
}
Exemple #5
0
/**
 * Performs an aggregation operation on a MongoDB collection.
 *
 * @param collection The MongoDB collection to query against.
 * @param pipeline A pointer to a BSON buffer representing the pipeline.
 * @param coldata The column data to store the results in.
 *
 * @return If successful, a Monary cursor that should be freed with
 * monary_close_query() when no longer in use. If unsuccessful, or if an invalid
 * pipeline was passed in, NULL is returned.
 */
monary_cursor* monary_init_aggregate(mongoc_collection_t* collection,
                                     const uint8_t* pipeline,
                                     monary_column_data* coldata)
{
    bson_t pl_bson;
    int32_t pl_size;
    mongoc_cursor_t* mcursor;
    monary_cursor* cursor;

    // Sanity checks
    if (!collection) {
        DEBUG("%s", "Invalid collection");
        return NULL;
    }
    else if (!pipeline) {
        DEBUG("%s", "Invalid pipeline");
        return NULL;
    }

    // Build BSON pipeline
    memcpy(&pl_size, pipeline, sizeof(int32_t));
    pl_size = (int32_t) BSON_UINT32_FROM_LE(pl_size);
    if (!bson_init_static(&pl_bson,
                          pipeline,
                          pl_size)) {
        DEBUG("%s", "Failed to initialize raw BSON pipeline");
        return NULL;
    }

    // Get an aggregation cursor
    mcursor = mongoc_collection_aggregate(collection,
                                          MONGOC_QUERY_NONE,
                                          &pl_bson,
                                          NULL,
                                          NULL);

    // Clean up
    bson_destroy(&pl_bson);

    if (!mcursor) {
        DEBUG("%s", "An error occurred with the aggregation");
        return NULL;
    }

    cursor = (monary_cursor*) malloc(sizeof(monary_cursor));
    cursor->mcursor = mcursor;
    cursor->coldata = coldata;
    return cursor;
}
int lua_mongo_collection_aggregate (lua_State *L)
{
    collection_t *collection;
    bson_t aggregation_pipeline = BSON_INITIALIZER;
    bson_t inner_aggregation_pipeline = BSON_INITIALIZER;
    mongoc_cursor_t *cursor = NULL;
    bson_error_t error;
    bool throw_error = false;

    collection = (collection_t *)luaL_checkudata(L, 1, "lua_mongoc_collection");

    if (!(lua_istable(L, 2)) || !(lua_table_is_array(L, 2))) {
        luaL_error(L, "aggregation pipeline must be an array");
    } else {
        bson_append_array_begin(&aggregation_pipeline, "pipeline", -1,
                                &inner_aggregation_pipeline);

        if (!(lua_table_to_bson(L, &inner_aggregation_pipeline, 2, false, &error))) {
            throw_error = true;
            goto DONE;
        }

        bson_append_array_end(&aggregation_pipeline, &inner_aggregation_pipeline);
    }

    cursor = mongoc_collection_aggregate (collection->c_collection, MONGOC_QUERY_NONE,
                                          &aggregation_pipeline,
                                          NULL, NULL);

DONE:
    bson_destroy(&aggregation_pipeline);
    bson_destroy(&inner_aggregation_pipeline);

    if (throw_error) {
        if (cursor) {
            mongoc_cursor_destroy (cursor);
        }
        luaL_error(L, error.message);
    }

    lua_mongo_cursor_new(L, cursor);

    return 1;
}
static void *
background_mongoc_collection_aggregate (void *data)
{
   future_t *future = (future_t *) data;
   future_value_t return_value;

   return_value.type = future_value_mongoc_cursor_ptr_type;

   future_value_set_mongoc_cursor_ptr (
      &return_value,
      mongoc_collection_aggregate (
         future_value_get_mongoc_collection_ptr (future_get_param (future, 0)),
         future_value_get_mongoc_query_flags_t (future_get_param (future, 1)),
         future_value_get_const_bson_ptr (future_get_param (future, 2)),
         future_value_get_const_bson_ptr (future_get_param (future, 3)),
         future_value_get_const_mongoc_read_prefs_ptr (future_get_param (future, 4))
      ));

   future_resolve (future, return_value);

   return NULL;
}
static mongoc_cursor_t *
aggregate (mongoc_collection_t *collection, mongoc_read_prefs_t *prefs)
{
   return mongoc_collection_aggregate (
      collection, MONGOC_QUERY_NONE, tmp_bson ("{}"), NULL /* opts */, prefs);
}
static void
test_aggregate (void)
{
   mongoc_collection_t *collection;
   mongoc_client_t *client;
   mongoc_cursor_t *cursor;
   const bson_t *doc;
   bson_error_t error;
   bool r;
   bson_t b;
   bson_t match;
   bson_t pipeline;
   bson_iter_t iter;

   bson_init(&b);
   bson_append_utf8(&b, "hello", -1, "world", -1);

   bson_init(&match);
   bson_append_document(&match, "$match", -1, &b);

   bson_init(&pipeline);
   bson_append_document(&pipeline, "0", -1, &match);

   client = mongoc_client_new(gTestUri);
   ASSERT (client);

   collection = mongoc_client_get_collection(client, "test", "test");
   ASSERT (collection);

   mongoc_collection_drop(collection, &error);

   r = mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, NULL, &error);
   ASSERT (r);

   cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, NULL);
   ASSERT (cursor);

   /*
    * This can fail if we are connecting to a pre-2.5.x MongoDB instance.
    */
   r = mongoc_cursor_next(cursor, &doc);
   if (mongoc_cursor_error(cursor, &error)) {
      MONGOC_WARNING("%s", error.message);
   }

   ASSERT (r);
   ASSERT (doc);

   ASSERT (bson_iter_init_find (&iter, doc, "hello") &&
           BSON_ITER_HOLDS_UTF8 (&iter));

   r = mongoc_cursor_next(cursor, &doc);
   if (mongoc_cursor_error(cursor, &error)) {
      MONGOC_WARNING("%s", error.message);
   }
   ASSERT (!r);
   ASSERT (!doc);

   mongoc_cursor_destroy(cursor);
   mongoc_collection_destroy(collection);
   mongoc_client_destroy(client);
   bson_destroy(&b);
   bson_destroy(&pipeline);
   bson_destroy(&match);
}
static void
test_aggregate (void)
{
   mongoc_collection_t *collection;
   mongoc_database_t *database;
   mongoc_client_t *client;
   mongoc_cursor_t *cursor;
   const bson_t *doc;
   bson_error_t error;
   bool r;
   bson_t b;
   bson_t opts;
   bson_t match;
   bson_t pipeline;
   bson_iter_t iter;
   int i;

   bson_init(&b);
   bson_append_utf8(&b, "hello", -1, "world", -1);

   bson_init(&match);
   bson_append_document(&match, "$match", -1, &b);

   bson_init(&pipeline);
   bson_append_document(&pipeline, "0", -1, &match);

   client = mongoc_client_new(gTestUri);
   ASSERT (client);

   database = get_test_database (client);
   ASSERT (database);

   collection = get_test_collection (client, "test_aggregate");
   ASSERT (collection);

   mongoc_collection_drop(collection, &error);

   r = mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, NULL, &error);
   ASSERT (r);

   for (i = 0; i < 2; i++) {
      if (i % 2 == 0) {
         cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, NULL, NULL);
         ASSERT (cursor);
      } else {
         bson_init (&opts);
         BSON_APPEND_INT32 (&opts, "batchSize", 10);
         BSON_APPEND_BOOL (&opts, "allowDiskUse", true);

         cursor = mongoc_collection_aggregate(collection, MONGOC_QUERY_NONE, &pipeline, &opts, NULL);
         ASSERT (cursor);

         bson_destroy (&opts);
      }

      /*
       * This can fail if we are connecting to a 2.0 MongoDB instance.
       */
      r = mongoc_cursor_next(cursor, &doc);
      if (mongoc_cursor_error(cursor, &error)) {
         if ((error.domain == MONGOC_ERROR_QUERY) &&
             (error.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
            mongoc_cursor_destroy (cursor);
            break;
         }
         MONGOC_WARNING("[%d.%d] %s", error.domain, error.code, error.message);
      }

      ASSERT (r);
      ASSERT (doc);

      ASSERT (bson_iter_init_find (&iter, doc, "hello") &&
              BSON_ITER_HOLDS_UTF8 (&iter));

      r = mongoc_cursor_next(cursor, &doc);
      if (mongoc_cursor_error(cursor, &error)) {
         MONGOC_WARNING("%s", error.message);
      }
      ASSERT (!r);
      ASSERT (!doc);

      mongoc_cursor_destroy(cursor);
   }

   r = mongoc_collection_drop(collection, &error);
   ASSERT (r);

   mongoc_collection_destroy(collection);
   mongoc_database_destroy(database);
   mongoc_client_destroy(client);
   bson_destroy(&b);
   bson_destroy(&pipeline);
   bson_destroy(&match);
}