static void
multi_upload_before (perf_test_t *test)
{
   multi_upload_test_t *upload_test;
   mongoc_client_t *client;
   mongoc_gridfs_t *gridfs;
   bson_error_t error;
   mongoc_database_t *db;
   multi_upload_thread_context_t *ctx;
   int i;

   perf_test_before (test);

   upload_test = (multi_upload_test_t *) test;
   client = mongoc_client_pool_pop (upload_test->pool);
   db = mongoc_client_get_database (client, "perftest");
   if (!mongoc_database_drop (db, &error)) {
      MONGOC_ERROR ("database_drop: %s\n", error.message);
      abort ();
   }

   gridfs = mongoc_client_get_gridfs (client, "perftest", NULL, &error);
   if (!gridfs) {
      MONGOC_ERROR ("get_gridfs: %s\n", error.message);
      abort ();
   }

   write_one_byte_file (gridfs);
   mongoc_gridfs_destroy (gridfs);
   mongoc_client_pool_push (upload_test->pool, client);

   for (i = 0; i < upload_test->cnt; i++) {
      ctx = &upload_test->contexts[i];
      ctx->client = mongoc_client_pool_pop (upload_test->pool);
      ctx->gridfs = mongoc_client_get_gridfs (ctx->client, "perftest",
                                              NULL, &error);
      if (!ctx->gridfs) {
         MONGOC_ERROR ("get_gridfs: %s\n", error.message);
         abort ();
      }

      ctx->stream = mongoc_stream_file_new_for_path (ctx->path, O_RDONLY, 0);
      if (!ctx->stream) {
         perror ("stream_new_for_path");
         abort ();
      }
   }

   mongoc_database_destroy (db);
}
Example #2
0
int
main (int   argc,
      char *argv[])
{
   mongoc_client_pool_t *pool;
   mongoc_client_t *client;
   mongoc_uri_t *uri;
   unsigned count = 10000;

   if (argc > 1) {
      if (!(uri = mongoc_uri_new(argv[1]))) {
         fprintf(stderr, "Failed to parse uri: %s\n", argv[1]);
         return 1;
      }
   } else {
      uri = mongoc_uri_new("mongodb://127.0.0.1:27017/?sockettimeoutms=500");
   }

   if (argc > 2) {
      count = MAX(atoi(argv[2]), 1);
   }

   pool = mongoc_client_pool_new(uri);
   client = mongoc_client_pool_pop(pool);
   test_load(client, count);
   mongoc_client_pool_push(pool, client);
   mongoc_uri_destroy(uri);
   mongoc_client_pool_destroy(pool);

   return 0;
}
static void
multi_download_before (perf_test_t *test)
{
   multi_download_test_t *download_test;
   bson_error_t error;
   multi_download_thread_context_t *ctx;
   int i;

   perf_test_before (test);
   prep_tmp_dir (test->data_path);

   download_test = (multi_download_test_t *) test;

   for (i = 0; i < download_test->cnt; i++) {
      ctx = &download_test->contexts[i];
      ctx->client = mongoc_client_pool_pop (download_test->pool);
      ctx->gridfs = mongoc_client_get_gridfs (ctx->client, "perftest",
                                              NULL, &error);

      if (!ctx->gridfs) {
         MONGOC_ERROR ("get_gridfs: %s\n", error.message);
         abort ();
      }
   }
}
static void
test_mongoc_metadata_append_after_cmd (void)
{
   mongoc_client_pool_t *pool;
   mongoc_client_t *client;
   mongoc_uri_t *uri;

   _reset_metadata ();

   uri = mongoc_uri_new ("mongodb://127.0.0.1?maxpoolsize=1&minpoolsize=1");
   pool = mongoc_client_pool_new (uri);

   /* Make sure that after we pop a client we can't set global metadata */
   pool = mongoc_client_pool_new (uri);

   client = mongoc_client_pool_pop (pool);

   ASSERT (!mongoc_metadata_append ("a", "a", "a"));

   mongoc_client_pool_push (pool, client);

   mongoc_uri_destroy (uri);
   mongoc_client_pool_destroy (pool);

   _reset_metadata ();
}
Example #5
0
static void
_test_topology_events (bool pooled)
{
   mongoc_client_t *client;
   mongoc_client_pool_t *pool = NULL;
   context_t context;
   bool r;
   bson_error_t error;
   bson_iter_t events_iter;
   bson_iter_t event_iter;
   uint32_t i;

   context_init (&context);

   if (pooled) {
      pool = test_framework_client_pool_new ();
      pool_set_topology_event_callbacks (pool, &context);
      client = mongoc_client_pool_pop (pool);
   } else {
      client = test_framework_client_new ();
      client_set_topology_event_callbacks (client, &context);
   }

   r = mongoc_client_command_simple (client, "admin", tmp_bson ("{'ping': 1}"),
                                     NULL, NULL, &error);
   ASSERT_OR_PRINT (r, error);

   if (pooled) {
      mongoc_client_pool_push (pool, client);
      mongoc_client_pool_destroy (pool);
   } else {
      mongoc_client_destroy (client);
   }

   /* first event is topology opening */
   bson_iter_init (&events_iter, &context.events);
   bson_iter_next (&events_iter);
   ASSERT (bson_iter_recurse (&events_iter, &event_iter));
   ASSERT (bson_iter_find (&event_iter, "topology_opening_event"));

   /* last event is topology closed */
   for (i = 1; i < context.n_events; i++) {
      ASSERT (bson_iter_next (&events_iter));
   }

   ASSERT (bson_iter_recurse (&events_iter, &event_iter));
   ASSERT (bson_iter_find (&event_iter, "topology_closed_event"));

   /* no more events */
   ASSERT (!bson_iter_next (&events_iter));

   context_destroy (&context);
}
static void
test_mongoc_client_pool_basic (void)
{
   mongoc_client_pool_t *pool;
   mongoc_client_t *client;
   mongoc_uri_t *uri;

   uri = mongoc_uri_new("mongodb://127.0.0.1?maxpoolsize=1&minpoolsize=1");
   pool = mongoc_client_pool_new(uri);
   client = mongoc_client_pool_pop(pool);
   assert(client);
   mongoc_client_pool_push(pool, client);
   mongoc_uri_destroy(uri);
   mongoc_client_pool_destroy(pool);
}
static PyObject *
pymongoc_client_pool_pop (PyObject *self,
                          PyObject *args)
{
   pymongoc_client_pool_t *client_pool = (pymongoc_client_pool_t *)self;
   mongoc_client_t *client;
   PyObject *pyclient;

   if (!pymongoc_client_pool_check (self)) {
      PyErr_SetString (PyExc_TypeError,
                       "self must be a pymongoc.ClientPool");
      return NULL;
   }

   BSON_ASSERT (client_pool->client_pool);

   client = mongoc_client_pool_pop (client_pool->client_pool);

   return pymongoc_client_new (client, false);
}
Example #8
0
static void
_test_heartbeat_events (bool pooled,
                        bool succeeded)
{
   context_t context;
   mock_server_t *server;
   mongoc_uri_t *uri;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool = NULL;
   future_t *future;
   request_t *request;
   char *expected_json;
   bson_error_t error;

   context_init (&context);

   /* auto-respond to "foo" command */
   server = mock_server_new ();
   mock_server_run (server);
   mock_server_autoresponds (server, responder, NULL, NULL);
   uri = mongoc_uri_copy (mock_server_get_uri (server));
   mongoc_uri_set_option_as_int32 (uri, "serverSelectionTimeoutMS", 400);

   if (pooled) {
      pool = mongoc_client_pool_new (uri);
      pool_set_heartbeat_event_callbacks (pool, &context);
      client = mongoc_client_pool_pop (pool);
   } else {
      client = mongoc_client_new_from_uri (uri);
      client_set_heartbeat_event_callbacks (client, &context);
   }

   /* trigger "ismaster" handshake */
   future = future_client_command_simple (client, "admin",
                                          tmp_bson ("{'foo': 1}"),
                                          NULL, NULL, &error);

   /* topology scanner calls ismaster once */
   request = mock_server_receives_ismaster (server);

   if (succeeded) {
      mock_server_replies_ok_and_destroys (request);
   } else {
      mock_server_hangs_up (request);
      request_destroy (request);
   }

   /* pooled client opens new socket, handshakes it by calling ismaster again */
   if (pooled && succeeded) {
      request = mock_server_receives_ismaster (server);
      mock_server_replies_ok_and_destroys (request);
   }

   if (succeeded) {
      /* "foo" command succeeds */
      ASSERT_OR_PRINT (future_get_bool (future), error);
   } else {
      ASSERT (!future_get_bool (future));
   }

   if (pooled) {
      mongoc_client_pool_push (pool, client);
      mongoc_client_pool_destroy (pool);
   } else {
      mongoc_client_destroy (client);
   }

   /* even if pooled, only topology scanner sends events, so we get one pair */
   if (succeeded) {
      expected_json = bson_strdup_printf (
         "{'0': {'heartbeat_started_event': {'host': '%s'}},"
         " '1': {'heartbeat_succeeded_event': {'host': '%s'}}}",
         mock_server_get_host_and_port (server),
         mock_server_get_host_and_port (server));
   } else {
      expected_json = bson_strdup_printf (
         "{'0': {'heartbeat_started_event': {'host': '%s'}},"
         " '1': {'heartbeat_failed_event': {'host': '%s'}}}",
         mock_server_get_host_and_port (server),
         mock_server_get_host_and_port (server));
   }

   check_json_apm_events (&context.events, tmp_bson (expected_json));

   future_destroy (future);
   bson_free (expected_json);
   mongoc_uri_destroy (uri);
   mock_server_destroy (server);
   context_destroy (&context);
}
void* MongoDBConnectionPool::newConnection(const bool& isWrite, const ConnectionNode& node) {
	mongoc_client_pool_t *pool = (mongoc_client_pool_t*)getEnv();
	mongoc_client_t *client = mongoc_client_pool_pop(pool);
	std::cout << client << std::endl;
	return client;
}
Example #10
0
Client *ClientPool :: pop ()
{
   mongoc_client_t * c = mongoc_client_pool_pop (clientPool.get());
   auto client = new Client(c, this);
   return client;
}
Example #11
0
static void
test2 (void)
{
   mongoc_read_prefs_t *read_prefs;
   mongoc_collection_t *collection;
   mongoc_cursor_t *cursor;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool = NULL;
   const bson_t *doc;
   bson_error_t error;
   bool r;
   bson_t q;

   bson_init(&q);

   /*
    * Start by killing 2 of the replica set nodes.
    */
   ha_node_kill(r1);
   ha_node_kill(r2);

   if (use_pool) {
      pool = ha_replica_set_create_client_pool(replica_set);
      client = mongoc_client_pool_pop (pool);
   } else {
      client = ha_replica_set_create_client(replica_set);
   }

   collection = mongoc_client_get_collection(client, "test2", "test2");

   /*
    * Perform a query and ensure it fails with no nodes available.
    */
   read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED);
   cursor = mongoc_collection_find(collection,
                                   MONGOC_QUERY_NONE,
                                   0,
                                   100,
                                   0,
                                   &q,
                                   NULL,
                                   read_prefs);

   /*
    * Try to submit OP_QUERY. Since it is SECONDARY PREFERRED, it should
    * succeed if there is any node up (which r3 is up).
    */
   r = mongoc_cursor_next(cursor, &doc);
   BSON_ASSERT(!r); /* No docs */

   /* No error, slaveOk was set */
   ASSERT_OR_PRINT (!mongoc_cursor_error(cursor, &error), error);

   mongoc_read_prefs_destroy(read_prefs);
   mongoc_cursor_destroy(cursor);
   mongoc_collection_destroy(collection);

   if (use_pool) {
      mongoc_client_pool_push (pool, client);
      mongoc_client_pool_destroy (pool);
   } else {
      mongoc_client_destroy(client);
   }

   bson_destroy(&q);

   ha_node_restart(r1);
   ha_node_restart(r2);
}
Example #12
0
static void
test1 (void)
{
   mongoc_server_description_t *description;
   mongoc_collection_t *collection;
   mongoc_read_prefs_t *read_prefs;
   mongoc_cursor_t *cursor;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool = NULL;
   const bson_t *doc;
   bson_error_t error;
   bool r;
   ha_node_t *replica;
   bson_t q;
   int i;

   bson_init(&q);

   if (use_pool) {
      pool = ha_replica_set_create_client_pool(replica_set);
      client = mongoc_client_pool_pop (pool);
   } else {
      client = ha_replica_set_create_client(replica_set);
   }

   collection = mongoc_client_get_collection(client, "test1", "test1");

   MONGOC_DEBUG("Inserting test documents.");
   insert_test_docs(collection);
   MONGOC_INFO("Test documents inserted.");

   read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY);

   MONGOC_DEBUG("Sending query to a SECONDARY.");
   cursor = mongoc_collection_find(collection,
                                   MONGOC_QUERY_NONE,
                                   0,
                                   0,
                                   100,
                                   &q,
                                   NULL,
                                   read_prefs);

   BSON_ASSERT(cursor);
   BSON_ASSERT(!cursor->server_id);

   /*
    * Send OP_QUERY to server and get first document back.
    */
   MONGOC_INFO("Sending OP_QUERY.");
   r = mongoc_cursor_next(cursor, &doc);
   BSON_ASSERT(r);
   BSON_ASSERT(cursor->server_id);
   BSON_ASSERT(cursor->sent);
   BSON_ASSERT(!cursor->done);
   BSON_ASSERT(cursor->rpc.reply.n_returned == 100);
   BSON_ASSERT(!cursor->end_of_event);

   /*
    * Make sure we queried a secondary.
    */
   description = mongoc_topology_server_by_id(client->topology,
                                              cursor->server_id,
                                              &error);
   ASSERT_OR_PRINT (description, error);
   BSON_ASSERT (description->type != MONGOC_SERVER_RS_PRIMARY);
   mongoc_server_description_destroy(description);

   /*
    * Exhaust the items in our first OP_REPLY.
    */
   MONGOC_DEBUG("Exhausting OP_REPLY.");
   for (i = 0; i < 98; i++) {
      r = mongoc_cursor_next(cursor, &doc);
      BSON_ASSERT(r);
      BSON_ASSERT(cursor->server_id);
      BSON_ASSERT(!cursor->done);
      BSON_ASSERT(!cursor->end_of_event);
   }

   /*
    * Finish off the last item in this OP_REPLY.
    */
   MONGOC_INFO("Fetcing last doc from OP_REPLY.");
   r = mongoc_cursor_next(cursor, &doc);
   BSON_ASSERT(r);
   BSON_ASSERT(cursor->server_id);
   BSON_ASSERT(cursor->sent);
   BSON_ASSERT(!cursor->done);
   BSON_ASSERT(!cursor->end_of_event);

   /*
    * Determine which node we queried by using the server_id to
    * get the cluster information.
    */

   BSON_ASSERT(cursor->server_id);
   replica = get_replica(client, cursor->server_id);

   /*
    * Kill the node we are communicating with.
    */
   MONGOC_INFO("Killing replicaSet node to synthesize failure.");
   ha_node_kill(replica);

   /*
    * Try to fetch the next result set, expect failure.
    */
   MONGOC_DEBUG("Checking for expected failure.");
   r = mongoc_cursor_next(cursor, &doc);
   BSON_ASSERT(!r);

   r = mongoc_cursor_error(cursor, &error);
   BSON_ASSERT(r);
   MONGOC_WARNING("%s", error.message);

   mongoc_cursor_destroy(cursor);
   mongoc_read_prefs_destroy(read_prefs);
   mongoc_collection_destroy(collection);

   if (use_pool) {
      mongoc_client_pool_push (pool, client);
      mongoc_client_pool_destroy (pool);
   } else {
      mongoc_client_destroy(client);
   }
   bson_destroy(&q);

   ha_node_restart(replica);
}
Example #13
0
static void
test_exhaust_cursor (bool pooled)
{
   mongoc_stream_t *stream;
   mongoc_write_concern_t *wr;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool = NULL;
   mongoc_collection_t *collection;
   mongoc_cursor_t *cursor;
   mongoc_cursor_t *cursor2;
   const bson_t *doc;
   bson_t q;
   bson_t b[10];
   bson_t *bptr[10];
   int i;
   bool r;
   uint32_t hint;
   bson_error_t error;
   bson_oid_t oid;
   int64_t timestamp1;

   if (pooled) {
      pool = test_framework_client_pool_new ();
      client = mongoc_client_pool_pop (pool);
   } else {
      client = test_framework_client_new ();
   }
   assert (client);

   collection = get_test_collection (client, "test_exhaust_cursor");
   assert (collection);

   mongoc_collection_drop(collection, &error);

   wr = mongoc_write_concern_new ();
   mongoc_write_concern_set_journal (wr, true);

   /* bulk insert some records to work on */
   {
      bson_init(&q);

      for (i = 0; i < 10; i++) {
         bson_init(&b[i]);
         bson_oid_init(&oid, NULL);
         bson_append_oid(&b[i], "_id", -1, &oid);
         bson_append_int32(&b[i], "n", -1, i % 2);
         bptr[i] = &b[i];
      }

      BEGIN_IGNORE_DEPRECATIONS;
      ASSERT_OR_PRINT (mongoc_collection_insert_bulk (
                          collection, MONGOC_INSERT_NONE,
                          (const bson_t **)bptr, 10, wr, &error),
                       error);
      END_IGNORE_DEPRECATIONS;
   }

   /* create a couple of cursors */
   {
      cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q,
                                       NULL, NULL);

      cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q,
                                        NULL, NULL);
   }

   /* Read from the exhaust cursor, ensure that we're in exhaust where we
    * should be and ensure that an early destroy properly causes a disconnect
    * */
   {
      r = mongoc_cursor_next (cursor, &doc);
      if (!r) {
         mongoc_cursor_error (cursor, &error);
         fprintf (stderr, "cursor error: %s\n", error.message);
      }
      assert (r);
      assert (doc);
      assert (cursor->in_exhaust);
      assert (client->in_exhaust);

      /* destroy the cursor, make sure a disconnect happened */
      timestamp1 = get_timestamp (client, cursor);
      mongoc_cursor_destroy (cursor);
      assert (! client->in_exhaust);
   }

   /* Grab a new exhaust cursor, then verify that reading from that cursor
    * (putting the client into exhaust), breaks a mid-stream read from a
    * regular cursor */
   {
      cursor = mongoc_collection_find (collection, MONGOC_QUERY_EXHAUST, 0, 0, 0, &q,
                                       NULL, NULL);

      r = mongoc_cursor_next (cursor2, &doc);
      if (!r) {
         mongoc_cursor_error (cursor2, &error);
         fprintf (stderr, "cursor error: %s\n", error.message);
      }
      assert (r);
      assert (doc);
      assert (timestamp1 < get_timestamp (client, cursor2));

      for (i = 0; i < 5; i++) {
         r = mongoc_cursor_next (cursor2, &doc);
         if (!r) {
            mongoc_cursor_error (cursor2, &error);
            fprintf (stderr, "cursor error: %s\n", error.message);
         }
         assert (r);
         assert (doc);
      }

      r = mongoc_cursor_next (cursor, &doc);
      assert (r);
      assert (doc);

      doc = NULL;
      r = mongoc_cursor_next (cursor2, &doc);
      assert (!r);
      assert (!doc);

      mongoc_cursor_error(cursor2, &error);
      assert (error.domain == MONGOC_ERROR_CLIENT);
      assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST);

      mongoc_cursor_destroy (cursor2);
   }

   /* make sure writes fail as well */
   {
      BEGIN_IGNORE_DEPRECATIONS;
      r = mongoc_collection_insert_bulk (collection, MONGOC_INSERT_NONE,
                                         (const bson_t **)bptr, 10, wr, &error);
      END_IGNORE_DEPRECATIONS;

      assert (!r);
      assert (error.domain == MONGOC_ERROR_CLIENT);
      assert (error.code == MONGOC_ERROR_CLIENT_IN_EXHAUST);
   }

   /* we're still in exhaust.
    *
    * 1. check that we can create a new cursor, as long as we don't read from it
    * 2. fully exhaust the exhaust cursor
    * 3. make sure that we don't disconnect at destroy
    * 4. make sure we can read the cursor we made during the exhuast
    */
   {
      cursor2 = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q,
                                        NULL, NULL);

      stream = (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, cursor->hint);
      hint = cursor->hint;

      for (i = 1; i < 10; i++) {
         r = mongoc_cursor_next (cursor, &doc);
         assert (r);
         assert (doc);
      }

      r = mongoc_cursor_next (cursor, &doc);
      assert (!r);
      assert (!doc);

      mongoc_cursor_destroy (cursor);

      assert (stream == (mongoc_stream_t *)mongoc_set_get(client->cluster.nodes, hint));

      r = mongoc_cursor_next (cursor2, &doc);
      assert (r);
      assert (doc);
   }

   bson_destroy(&q);
   for (i = 0; i < 10; i++) {
      bson_destroy(&b[i]);
   }

   ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error);

   mongoc_write_concern_destroy (wr);
   mongoc_cursor_destroy (cursor2);
   mongoc_collection_destroy(collection);

   if (pooled) {
      mongoc_client_pool_push (pool, client);
      mongoc_client_pool_destroy (pool);
   } else {
      mongoc_client_destroy (client);
   }
}
Example #14
0
static int mongodb_log(struct ast_cdr *cdr)
{
	int ret = -1;
    bson_t *doc = NULL;
    mongoc_collection_t *collection = NULL;

	if(dbpool == NULL) {
		ast_log(LOG_ERROR, "unexpected error, no connection pool\n");
		return ret;
	}

	mongoc_client_t *dbclient = mongoc_client_pool_pop(dbpool);
	if(dbclient == NULL) {
		ast_log(LOG_ERROR, "unexpected error, no client allocated\n");
		return ret;
	}

	do {
	    bson_error_t error;

	    doc = bson_new();
		if(doc == NULL) {
			ast_log(LOG_ERROR, "cannot make a document\n");
			break;
		}
	    mongoc_collection_t *collection = mongoc_client_get_collection(dbclient, dbname, dbcollection);
		if(collection == NULL) {
			ast_log(LOG_ERROR, "cannot get such a collection, %s, %s\n", dbname, dbcollection);
			break;
		}
		BSON_APPEND_UTF8(doc, "clid", cdr->clid);
		BSON_APPEND_UTF8(doc, "src", cdr->src);
		BSON_APPEND_UTF8(doc, "dst", cdr->dst);
		BSON_APPEND_UTF8(doc, "dcontext", cdr->dcontext);
		BSON_APPEND_UTF8(doc, "channel", cdr->channel);
		BSON_APPEND_UTF8(doc, "dstchannel", cdr->dstchannel);
		BSON_APPEND_UTF8(doc, "lastapp", cdr->lastapp);
		BSON_APPEND_UTF8(doc, "lastdata", cdr->lastdata);
		BSON_APPEND_UTF8(doc, "disposition", ast_cdr_disp2str(cdr->disposition));
		BSON_APPEND_UTF8(doc, "amaflags", ast_channel_amaflags2string(cdr->amaflags));
		BSON_APPEND_UTF8(doc, "accountcode", cdr->accountcode);
		BSON_APPEND_UTF8(doc, "uniqueid", cdr->uniqueid);
		BSON_APPEND_UTF8(doc, "userfield", cdr->userfield);
		BSON_APPEND_UTF8(doc, "peeraccount", cdr->peeraccount);
		BSON_APPEND_UTF8(doc, "linkedid", cdr->linkedid);
		BSON_APPEND_INT32(doc, "duration", cdr->duration);
		BSON_APPEND_INT32(doc, "billsec", cdr->billsec);
		BSON_APPEND_INT32(doc, "sequence", cdr->sequence);
		BSON_APPEND_TIMEVAL(doc, "start", &cdr->start);
		BSON_APPEND_TIMEVAL(doc, "answer", &cdr->answer);
		BSON_APPEND_TIMEVAL(doc, "end", &cdr->end);
		if (serverid)
			BSON_APPEND_OID(doc, SERVERID, serverid);

		if(!mongoc_collection_insert(collection, MONGOC_INSERT_NONE, doc, NULL, &error))
			ast_log(LOG_ERROR, "insertion failed, %s\n", error.message);

		ret = 0; // success
	} while(0);

	if (collection)
	    mongoc_collection_destroy(collection);
	if (doc)
	    bson_destroy(doc);
	mongoc_client_pool_push(dbpool, dbclient);
	return ret;
}
static void
test_mongoc_metadata_append_success (void)
{
   mock_server_t *server;
   mongoc_uri_t *uri;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool;
   request_t *request;
   const bson_t *request_doc;
   bson_iter_t iter;
   bson_iter_t md_iter;
   bson_iter_t inner_iter;
   const char *val;

   const char *driver_name = "php driver";
   const char *driver_version = "version abc";
   const char *platform = "./configure -nottoomanyflags";

   char big_string [METADATA_MAX_SIZE];

   memset (big_string, 'a', METADATA_MAX_SIZE - 1);
   big_string [METADATA_MAX_SIZE - 1] = '\0';

   _reset_metadata ();
   /* Make sure setting the metadata works */
   ASSERT (mongoc_metadata_append (driver_name, driver_version, platform));

   server = mock_server_new ();
   mock_server_run (server);
   uri = mongoc_uri_copy (mock_server_get_uri (server));
   mongoc_uri_set_option_as_int32 (uri, "heartbeatFrequencyMS", 500);
   pool = mongoc_client_pool_new (uri);

   /* Force topology scanner to start */
   client = mongoc_client_pool_pop (pool);

   request = mock_server_receives_ismaster (server);
   ASSERT (request);
   request_doc = request_get_doc (request, 0);
   ASSERT (request_doc);
   ASSERT (bson_has_field (request_doc, "isMaster"));
   ASSERT (bson_has_field (request_doc, METADATA_FIELD));

   ASSERT (bson_iter_init_find (&iter, request_doc, METADATA_FIELD));
   ASSERT (bson_iter_recurse (&iter, &md_iter));

   /* Make sure driver.name and driver.version and platform are all right */
   ASSERT (bson_iter_find (&md_iter, "driver"));
   ASSERT (BSON_ITER_HOLDS_DOCUMENT (&md_iter));
   ASSERT (bson_iter_recurse (&md_iter, &inner_iter));
   ASSERT (bson_iter_find (&inner_iter, "name"));
   ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter));
   val = bson_iter_utf8 (&inner_iter, NULL);
   ASSERT (val);
   ASSERT (strstr (val, driver_name) != NULL);

   ASSERT (bson_iter_find (&inner_iter, "version"));
   ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter));
   val = bson_iter_utf8 (&inner_iter, NULL);
   ASSERT (val);
   ASSERT (strstr (val, driver_version));

   /* Check os type not empty */
   ASSERT (bson_iter_find (&md_iter, "os"));
   ASSERT (BSON_ITER_HOLDS_DOCUMENT (&md_iter));
   ASSERT (bson_iter_recurse (&md_iter, &inner_iter));

   ASSERT (bson_iter_find (&inner_iter, "type"));
   ASSERT (BSON_ITER_HOLDS_UTF8 (&inner_iter));
   val = bson_iter_utf8 (&inner_iter, NULL);
   ASSERT (val);
   ASSERT (strlen (val) > 0);

   /* Not checking os_name, as the spec says it can be NULL. */

   /* Check platform field ok */
   ASSERT (bson_iter_find (&md_iter, "platform"));
   ASSERT (BSON_ITER_HOLDS_UTF8 (&md_iter));
   val = bson_iter_utf8 (&md_iter, NULL);
   ASSERT (val);
   ASSERT (strstr (val, platform) != NULL);

   mock_server_replies_simple (request,
                               "{'ok': 1, 'ismaster': true}");
   request_destroy (request);

   /* Cleanup */
   mongoc_client_pool_push (pool, client);
   mongoc_client_pool_destroy (pool);
   mongoc_uri_destroy (uri);
   mock_server_destroy (server);

   _reset_metadata ();
}
/* Test the case where we can't prevent the metadata doc being too big
 * and so we just don't send it */
static void
test_mongoc_metadata_cannot_send (void)
{
   mock_server_t *server;
   mongoc_uri_t *uri;
   mongoc_client_t *client;
   mongoc_client_pool_t *pool;
   request_t *request;
   const char *const server_reply = "{'ok': 1, 'ismaster': true}";
   const bson_t *request_doc;
   char big_string[METADATA_MAX_SIZE];
   mongoc_metadata_t *md;

   _reset_metadata ();

   /* Mess with our global metadata struct so the metadata doc will be
    * way too big */
   memset (big_string, 'a', METADATA_MAX_SIZE - 1);
   big_string[METADATA_MAX_SIZE - 1] = '\0';

   md = _mongoc_metadata_get ();
   bson_free (md->os_name);
   md->os_name = bson_strdup (big_string);

   server = mock_server_new ();
   mock_server_run (server);
   uri = mongoc_uri_copy (mock_server_get_uri (server));
   mongoc_uri_set_option_as_int32 (uri, "heartbeatFrequencyMS", 500);
   pool = mongoc_client_pool_new (uri);

   /* Pop a client to trigger the topology scanner */
   client = mongoc_client_pool_pop (pool);
   request = mock_server_receives_ismaster (server);

   /* Make sure the isMaster request DOESN'T have a metadata field: */
   ASSERT (request);
   request_doc = request_get_doc (request, 0);
   ASSERT (request_doc);
   ASSERT (bson_has_field (request_doc, "isMaster"));
   ASSERT (!bson_has_field (request_doc, METADATA_FIELD));

   mock_server_replies_simple (request, server_reply);
   request_destroy (request);

   /* Cause failure on client side */
   request = mock_server_receives_ismaster (server);
   ASSERT (request);
   mock_server_hangs_up (request);
   request_destroy (request);

   /* Make sure the isMaster request still DOESN'T have a metadata field
    * on subsequent heartbeats. */
   request = mock_server_receives_ismaster (server);
   ASSERT (request);
   request_doc = request_get_doc (request, 0);
   ASSERT (request_doc);
   ASSERT (bson_has_field (request_doc, "isMaster"));
   ASSERT (!bson_has_field (request_doc, METADATA_FIELD));

   mock_server_replies_simple (request, server_reply);
   request_destroy (request);

   /* cleanup */
   mongoc_client_pool_push (pool, client);

   mongoc_client_pool_destroy (pool);
   mongoc_uri_destroy (uri);
   mock_server_destroy (server);

   /* Reset again so the next tests don't have a metadata doc which
    * is too big */
   _reset_metadata ();
}