/** run as a child thread by test_mongoc_tls_hangup
 *
 * It:
 *    1. spins up
 *    2. waits on a condvar until the server is up
 *    3. connects to the server's port
 *    4. writes a byte
 *    5. confirms that the server hangs up promptly
 *    6. shuts down
 */
static void *
ssl_hangup_client (void *ptr)
{
   ssl_test_data_t *data = (ssl_test_data_t *)ptr;
   mongoc_stream_t *sock_stream;
   mongoc_stream_t *ssl_stream;
   mongoc_socket_t *conn_sock;
   char buf = 'b';
   ssize_t r;
   mongoc_iovec_t riov;
   mongoc_iovec_t wiov;
   struct sockaddr_in server_addr = { 0 };
   int64_t start_time;
   bson_error_t error;

   conn_sock = mongoc_socket_new (AF_INET, SOCK_STREAM, 0);
   assert (conn_sock);

   mongoc_mutex_lock (&data->cond_mutex);
   while (!data->server_port) {
      mongoc_cond_wait (&data->cond, &data->cond_mutex);
   }
   mongoc_mutex_unlock (&data->cond_mutex);

   server_addr.sin_family = AF_INET;
   server_addr.sin_port = htons (data->server_port);
   server_addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK);

   r = mongoc_socket_connect (conn_sock, (struct sockaddr *)&server_addr,
                              sizeof (server_addr), -1);
   assert (r == 0);

   sock_stream = mongoc_stream_socket_new (conn_sock);
   assert (sock_stream);

   ssl_stream = mongoc_stream_tls_new (sock_stream, data->client, 1);
   assert (ssl_stream);

   r = mongoc_stream_tls_handshake_block (ssl_stream, data->host, TIMEOUT, &error);
   assert (r);

   wiov.iov_base = (void *)&buf;
   wiov.iov_len = 1;
   r = mongoc_stream_writev (ssl_stream, &wiov, 1, TIMEOUT);
   assert (r == 1);

   riov.iov_base = (void *)&buf;
   riov.iov_len = 1;

   /* we should notice promptly that the server hangs up */
   start_time = bson_get_monotonic_time ();
   r = mongoc_stream_readv (ssl_stream, &riov, 1, 1, TIMEOUT);
   /* time is in microseconds */
   assert (bson_get_monotonic_time () - start_time < 1000 * 1000);
   assert (r == -1);
   mongoc_stream_destroy (ssl_stream);
   data->client_result->result = SSL_TEST_SUCCESS;
   return NULL;
}
Exemple #2
0
static void
test_get_monotonic_time (void)
{
   bson_int64_t t;
   bson_int64_t t2;

   t = bson_get_monotonic_time();
   t2 = bson_get_monotonic_time();
   assert(t);
   assert(t2);
   assert_cmpint(t, <=, t2);
}
static ssize_t
_mongoc_stream_tls_secure_transport_write (mongoc_stream_t *stream,
                                           char *buf,
                                           size_t buf_len)
{
   OSStatus status;
   mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
   mongoc_stream_tls_secure_transport_t *secure_transport =
      (mongoc_stream_tls_secure_transport_t *) tls->ctx;
   ssize_t write_ret;
   int64_t now;
   int64_t expire = 0;

   ENTRY;
   BSON_ASSERT (secure_transport);

   if (tls->timeout_msec >= 0) {
      expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL);
   }

   status = SSLWrite (
      secure_transport->ssl_ctx_ref, buf, buf_len, (size_t *) &write_ret);

   switch (status) {
   case errSSLWouldBlock:
   case noErr:
      break;

   case errSSLClosedAbort:
      errno = ECONNRESET;

   default:
      RETURN (-1);
   }

   if (expire) {
      now = bson_get_monotonic_time ();

      if ((expire - now) < 0) {
         if (write_ret < buf_len) {
            mongoc_counter_streams_timeout_inc ();
         }

         tls->timeout_msec = 0;
      } else {
         tls->timeout_msec = (expire - now) / 1000L;
      }
   }

   RETURN (write_ret);
}
/** run as a child thread by test_mongoc_tls_handshake_stall
 *
 * It:
 *    1. spins up
 *    2. waits on a condvar until the server is up
 *    3. connects to the server's port
 *    4. attempts handshake
 *    5. confirms that it times out
 *    6. shuts down
 */
static void *
handshake_stall_client (void *ptr)
{
   ssl_test_data_t *data = (ssl_test_data_t *)ptr;
   char *uri_str;
   mongoc_client_t *client;
   bson_t reply;
   bson_error_t error;
   int64_t connect_timeout_ms = data->handshake_stall_ms - 100;
   int64_t duration_ms;

   int64_t start_time;

   mongoc_mutex_lock (&data->cond_mutex);
   while (!data->server_port) {
      mongoc_cond_wait (&data->cond, &data->cond_mutex);
   }
   mongoc_mutex_unlock (&data->cond_mutex);

   uri_str = bson_strdup_printf (
      "mongodb://localhost:%u/?ssl=true&serverselectiontimeoutms=200&connecttimeoutms=%" PRId64,
      data->server_port, connect_timeout_ms);

   client = mongoc_client_new (uri_str);

   /* we should time out after about 200ms */
   start_time = bson_get_monotonic_time ();
   mongoc_client_get_server_status (client,
                                    NULL,
                                    &reply,
                                    &error);

   /* time is in microseconds */
   duration_ms = (bson_get_monotonic_time () - start_time) / 1000;

   if (llabs(duration_ms - connect_timeout_ms) > 100) {
      fprintf (stderr,
               "expected timeout after about 200ms, not %" PRId64 "\n",
               duration_ms);
      abort ();
   }

   data->client_result->result = SSL_TEST_SUCCESS;

   bson_destroy (&reply);
   mongoc_client_destroy (client);
   bson_free (uri_str);

   return NULL;
}
static ssize_t
_mongoc_stream_tls_write (mongoc_stream_tls_t *tls,
                          char                *buf,
                          size_t               buf_len)
{
   ssize_t ret;

   int64_t now;
   int64_t expire = 0;

   BSON_ASSERT (tls);
   BSON_ASSERT (buf);
   BSON_ASSERT (buf_len);

   if (tls->timeout_msec >= 0) {
      expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL);
   }

   ret = BIO_write (tls->bio, buf, buf_len);

   if (ret < 0) {
      return ret;
   }

   if (expire) {
      now = bson_get_monotonic_time ();

      if ((expire - now) < 0) {
         if (ret < buf_len) {
            mongoc_counter_streams_timeout_inc();
#ifdef _WIN32
            errno = WSAETIMEDOUT;
#else
            errno = ETIMEDOUT;
#endif
         }

         tls->timeout_msec = 0;
      } else {
         tls->timeout_msec = (expire - now) / 1000L;
      }
   }

   return ret;
}
static BSON_INLINE int64_t
get_expiration (int32_t timeout_msec)
{
   if (timeout_msec < 0) {
      return -1;
   } else if (timeout_msec == 0) {
      return 0;
   } else {
      return (bson_get_monotonic_time () + ((int64_t) timeout_msec * 1000L));
   }
}
double
mock_server_get_uptime_sec (mock_server_t *server)
{
   double uptime;

   mongoc_mutex_lock (&server->mutex);
   uptime = (bson_get_monotonic_time () - server->start_time) / 1e6;
   mongoc_mutex_unlock (&server->mutex);

   return uptime;
}
Exemple #8
0
bool
future_wait (future_t *future)
{
   int64_t deadline = bson_get_monotonic_time () + get_future_timeout_ms ();
   bool resolved;

   mongoc_mutex_lock (&future->mutex);
   while (!future->resolved && bson_get_monotonic_time () <= deadline) {
      mongoc_cond_timedwait (&future->cond,
                             &future->mutex,
                             get_future_timeout_ms ());
   }
   resolved = future->resolved;
   mongoc_mutex_unlock (&future->mutex);

   if (resolved) {
      future->awaited = true;

      /* free memory */
      mongoc_thread_join (future->thread);
   }

   return resolved;
}
static void
test_cond_wait (void)
{
   int64_t start, duration_usec;
   mongoc_mutex_t mutex;
   mongoc_cond_t cond;

   mongoc_mutex_init (&mutex);
   mongoc_cond_init (&cond);

   mongoc_mutex_lock (&mutex);
   start = bson_get_monotonic_time ();
   mongoc_cond_timedwait (&cond, &mutex, 100);
   duration_usec = bson_get_monotonic_time () - start;
   mongoc_mutex_unlock (&mutex);

   if (!((50 * 1000 < duration_usec) && (150 * 1000 > duration_usec))) {
      fprintf (
         stderr, "expected to wait 100ms, waited %" PRId64 "\n", duration_usec);
   }

   mongoc_cond_destroy (&cond);
   mongoc_mutex_destroy (&mutex);
}
mongoc_async_cmd_result_t
_mongoc_async_cmd_phase_initiate (mongoc_async_cmd_t *acmd)
{
   acmd->stream = acmd->initiator (acmd);
   if (!acmd->stream) {
      return MONGOC_ASYNC_CMD_ERROR;
   }
   /* reset the connect started time after connection starts. */
   acmd->connect_started = bson_get_monotonic_time ();
   if (acmd->setup) {
      acmd->state = MONGOC_ASYNC_CMD_SETUP;
   } else {
      acmd->state = MONGOC_ASYNC_CMD_SEND;
   }
   return MONGOC_ASYNC_CMD_IN_PROGRESS;
}
mongoc_async_cmd_t *
mongoc_async_cmd_new (mongoc_async_t *async,
                      mongoc_stream_t *stream,
                      bool is_setup_done,
                      struct addrinfo *dns_result,
                      mongoc_async_cmd_initiate_t initiator,
                      int64_t initiate_delay_ms,
                      mongoc_async_cmd_setup_t setup,
                      void *setup_ctx,
                      const char *dbname,
                      const bson_t *cmd,
                      mongoc_async_cmd_cb_t cb,
                      void *cb_data,
                      int64_t timeout_msec)
{
   mongoc_async_cmd_t *acmd;

   BSON_ASSERT (cmd);
   BSON_ASSERT (dbname);

   acmd = (mongoc_async_cmd_t *) bson_malloc0 (sizeof (*acmd));
   acmd->async = async;
   acmd->dns_result = dns_result;
   acmd->timeout_msec = timeout_msec;
   acmd->stream = stream;
   acmd->initiator = initiator;
   acmd->initiate_delay_ms = initiate_delay_ms;
   acmd->setup = setup;
   acmd->setup_ctx = setup_ctx;
   acmd->cb = cb;
   acmd->data = cb_data;
   acmd->connect_started = bson_get_monotonic_time ();
   bson_copy_to (cmd, &acmd->cmd);

   _mongoc_array_init (&acmd->array, sizeof (mongoc_iovec_t));
   _mongoc_buffer_init (&acmd->buffer, NULL, 0, NULL, NULL);

   _mongoc_async_cmd_init_send (acmd, dbname);

   _mongoc_async_cmd_state_start (acmd, is_setup_done);

   async->ncmds++;
   DL_APPEND (async->cmds, acmd);

   return acmd;
}
Exemple #12
0
mock_server_t *
mock_server_new ()
{
   mock_server_t *server = (mock_server_t *)bson_malloc0 (sizeof (mock_server_t));

   server->request_timeout_msec = get_future_timeout_ms ();
   _mongoc_array_init (&server->autoresponders,
                       sizeof (autoresponder_handle_t));
   _mongoc_array_init (&server->worker_threads,
                       sizeof (mongoc_thread_t));
   mongoc_cond_init (&server->cond);
   mongoc_mutex_init (&server->mutex);
   server->q = q_new ();
   server->start_time = bson_get_monotonic_time ();

   return server;
}
mock_server_t *
mock_server_new ()
{
   mock_server_t *server = (mock_server_t *)bson_malloc0 (sizeof (mock_server_t));

   server->request_timeout_msec = 10 * 1000;
   _mongoc_array_init (&server->autoresponders,
                       sizeof (autoresponder_handle_t));
   _mongoc_array_init (&server->worker_threads,
                       sizeof (mongoc_thread_t));
   mongoc_cond_init (&server->cond);
   mongoc_mutex_init (&server->mutex);
   server->q = q_new ();
   server->start_time = bson_get_monotonic_time ();

   if (test_framework_getenv_bool ("MONGOC_TEST_SERVER_VERBOSE")) {
      server->verbose = true;
   }

   return server;
}
bool
mongoc_async_cmd_run (mongoc_async_cmd_t *acmd)
{
   mongoc_async_cmd_result_t result;
   int64_t duration_usec;
   _mongoc_async_cmd_phase_t phase_callback;

   BSON_ASSERT (acmd);

   /* if we have successfully connected to the node, call the callback. */
   if (acmd->state == MONGOC_ASYNC_CMD_SEND) {
      acmd->cb (acmd, MONGOC_ASYNC_CMD_CONNECTED, NULL, 0);
   }

   phase_callback = gMongocCMDPhases[acmd->state];
   if (phase_callback) {
      result = phase_callback (acmd);
   } else {
      result = MONGOC_ASYNC_CMD_ERROR;
   }

   if (result == MONGOC_ASYNC_CMD_IN_PROGRESS) {
      return true;
   }

   duration_usec = bson_get_monotonic_time () - acmd->cmd_started;

   if (result == MONGOC_ASYNC_CMD_SUCCESS) {
      acmd->cb (acmd, result, &acmd->reply, duration_usec);
   } else {
      /* we're in ERROR, TIMEOUT, or CANCELED */
      acmd->cb (acmd, result, NULL, duration_usec);
   }

   mongoc_async_cmd_destroy (acmd);
   return false;
}
Exemple #15
0
static ssize_t
mongoc_stream_unix_writev (mongoc_stream_t *stream,
                           struct iovec    *iov,
                           size_t           iovcnt,
                           bson_uint32_t    timeout_msec)
{
   mongoc_stream_unix_t *file = (mongoc_stream_unix_t *)stream;
   struct msghdr msg;
   struct pollfd fds;
   bson_int64_t now;
   bson_int64_t expire;
   size_t cur = 0;
   ssize_t written;
   ssize_t ret = 0;
   int flags = 0;
   int timeout;

   bson_return_val_if_fail(stream, -1);
   bson_return_val_if_fail(iov, -1);
   bson_return_val_if_fail(iovcnt, -1);

   /*
    * NOTE: See notes from mongoc_stream_unix_readv(), the apply here too.
    */

   if (file->fd == -1) {
      errno = EBADF;
      return -1;
   }

   if (!timeout_msec) {
      timeout_msec = MONGOC_DEFAULT_TIMEOUT_MSEC;
   }

   /*
    * We require a monotonic clock for determining out timeout interval. This
    * is so that we are resilient to changes in the underlying wall clock,
    * such as during timezone changes. The monotonic clock is in microseconds
    * since an unknown epoch (but often system startup).
    */
   expire = bson_get_monotonic_time() + (timeout_msec * 1000UL);

   /*
    * Prepare our pollfd. If POLLRDHUP is supported, we can get notified of
    * our peer hang-up.
    */
   fds.fd = file->fd;
   fds.events = (POLLOUT | POLLERR | POLLHUP);

   for (;;) {
      /*
       * Build our message for recvmsg() taking into account that we may have
       * already done a short-read and must increment the iovec.
       */
      msg.msg_name = NULL;
      msg.msg_namelen = 0;
      msg.msg_iov = iov + cur;
      msg.msg_iovlen = iovcnt - cur;
      msg.msg_control = NULL;
      msg.msg_controllen = 0;
      msg.msg_flags = 0;

      BSON_ASSERT(msg.msg_iov->iov_len);
      BSON_ASSERT(cur < iovcnt);

      fds.revents = 0;

      /*
       * Determine number of milliseconds until timeout expires.
       */
      now = bson_get_monotonic_time();
      timeout = MAX(0, (expire - now) / 1000UL);

      /*
       * Block on poll() until data is available or timeout. Upont timeout,
       * synthesize an errno of ETIMEDOUT.
       */
      errno = 0;
      fds.revents = 0;
      written = poll(&fds, 1, timeout);
      if (written == -1) {
         return -1;
      } else if (written == 0) {
         errno = ETIMEDOUT;
         return -1;
      }

      /*
       * Perform sendmsg() on socket to send next chunk of data. If it turns
       * out this is not a socket, fall back to writev(). This should only
       * happen during unit tests.
       */
      errno = 0;
      written = TEMP_FAILURE_RETRY(sendmsg(file->fd, &msg, flags));
      if (written == -1 && errno == ENOTSOCK) {
         written = TEMP_FAILURE_RETRY(writev(file->fd, iov + cur, iovcnt - cur));
         if (!written) {
            return ret;
         }
      }

      /*
       * If our recvmsg() failed, we can't do much now can we.
       */
      if (written == -1) {
         return written;
      } else {
         ret += written;
      }

      BSON_ASSERT(cur < iovcnt);

      /*
       * Increment iovec's in the case we got a short read. Break out if
       * we have read all our expected data.
       */
      while ((cur < iovcnt) && (written >= iov[cur].iov_len)) {
         BSON_ASSERT(iov[cur].iov_len);
         written -= iov[cur++].iov_len;
         BSON_ASSERT(cur <= iovcnt);
      }
      if (cur == iovcnt) {
         break;
      }
      iov[cur].iov_base = ((bson_uint8_t *)iov[cur].iov_base) + written;
      iov[cur].iov_len -= written;

      BSON_ASSERT(iovcnt - cur);
      BSON_ASSERT(iov[cur].iov_len);
   }

   return ret;
}
mongoc_async_cmd_result_t
_mongoc_async_cmd_phase_send (mongoc_async_cmd_t *acmd)
{
   size_t total_bytes = 0;
   size_t offset;
   ssize_t bytes;
   int i;
   /* if a continued write, then iovec will be set to a temporary copy */
   bool used_temp_iovec = false;
   mongoc_iovec_t *iovec = acmd->iovec;
   size_t niovec = acmd->niovec;

   for (i = 0; i < acmd->niovec; i++) {
      total_bytes += acmd->iovec[i].iov_len;
   }

   if (acmd->bytes_written > 0) {
      BSON_ASSERT (acmd->bytes_written < total_bytes);
      /* if bytes have been written before, compute the offset in the next
       * iovec entry to be written. */
      offset = acmd->bytes_written;

      /* subtract the lengths of all iovec entries written so far. */
      for (i = 0; i < acmd->niovec; i++) {
         if (offset < acmd->iovec[i].iov_len) {
            break;
         }
         offset -= acmd->iovec[i].iov_len;
      }

      BSON_ASSERT (i < acmd->niovec);

      /* create a new iovec with the remaining data to be written. */
      niovec = acmd->niovec - i;
      iovec = bson_malloc (niovec * sizeof (mongoc_iovec_t));
      memcpy (iovec, acmd->iovec + i, niovec * sizeof (mongoc_iovec_t));
      iovec[0].iov_base = (char *) iovec[0].iov_base + offset;
      iovec[0].iov_len -= offset;
      used_temp_iovec = true;
   }

   bytes = mongoc_stream_writev (acmd->stream, iovec, niovec, 0);

   if (used_temp_iovec) {
      bson_free (iovec);
   }

   if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) {
      return MONGOC_ASYNC_CMD_IN_PROGRESS;
   }

   if (bytes < 0) {
      bson_set_error (&acmd->error,
                      MONGOC_ERROR_STREAM,
                      MONGOC_ERROR_STREAM_SOCKET,
                      "Failed to write rpc bytes.");
      return MONGOC_ASYNC_CMD_ERROR;
   }

   acmd->bytes_written += bytes;

   if (acmd->bytes_written < total_bytes) {
      return MONGOC_ASYNC_CMD_IN_PROGRESS;
   }

   acmd->state = MONGOC_ASYNC_CMD_RECV_LEN;
   acmd->bytes_to_read = 4;
   acmd->events = POLLIN;

   acmd->cmd_started = bson_get_monotonic_time ();

   return MONGOC_ASYNC_CMD_IN_PROGRESS;
}
static void *
main_thread (void *data)
{
   mock_server_t *server = (mock_server_t *)data;
   mongoc_socket_t *client_sock;
   bool stopped;
   uint16_t port;
   mongoc_stream_t *client_stream;
   worker_closure_t *closure;
   mongoc_thread_t thread;
   mongoc_array_t worker_threads;
   size_t i;

   mongoc_mutex_lock (&server->mutex);
   server->running = true;
   mongoc_cond_signal (&server->cond);
   mongoc_mutex_unlock (&server->mutex);

   for (; ;) {
      client_sock = mongoc_socket_accept_ex (
            server->sock,
            bson_get_monotonic_time () + TIMEOUT,
            &port);

      mongoc_mutex_lock (&server->mutex);
      stopped = server->stopped;
      mongoc_mutex_unlock (&server->mutex);

      if (stopped) {
         break;
      }

      if (client_sock) {
         if (mock_server_get_verbose (server)) {
            printf ("%5.2f  %hu -> server port %hu (connected)\n",
                    mock_server_get_uptime_sec (server),
                    port, server->port);
            fflush (stdout);
         }

         client_stream = mongoc_stream_socket_new (client_sock);

#ifdef MONGOC_ENABLE_SSL
         mongoc_mutex_lock (&server->mutex);
         if (server->ssl) {
            server->ssl_opts.weak_cert_validation = 1;
            client_stream = mongoc_stream_tls_new (client_stream,
                                                   &server->ssl_opts, 0);
            if (!client_stream) {
               mongoc_mutex_unlock (&server->mutex);
               perror ("Failed to attach tls stream");
               break;
            }
         }
         mongoc_mutex_unlock (&server->mutex);
#endif
         closure = (worker_closure_t *)bson_malloc (sizeof *closure);
         closure->server = server;
         closure->client_stream = client_stream;
         closure->port = port;

         mongoc_thread_create (&thread, worker_thread, closure);

         mongoc_mutex_lock (&server->mutex);
         _mongoc_array_append_val (&server->worker_threads, thread);
         mongoc_mutex_unlock (&server->mutex);
      }
   }

   /* copy list of worker threads and join them all */
   _mongoc_array_init (&worker_threads, sizeof (mongoc_thread_t));
   mongoc_mutex_lock (&server->mutex);
   _mongoc_array_copy (&worker_threads, &server->worker_threads);
   mongoc_mutex_unlock (&server->mutex);

   for (i = 0; i < worker_threads.len; i++) {
      mongoc_thread_join (
         _mongoc_array_index (&worker_threads, mongoc_thread_t, i));
   }

   _mongoc_array_destroy (&worker_threads);

   mongoc_mutex_lock (&server->mutex);
   server->running = false;
   mongoc_mutex_unlock (&server->mutex);

   return NULL;
}
void
mock_server_destroy (mock_server_t *server)
{
   size_t i;
   autoresponder_handle_t *handle;
   int64_t deadline = bson_get_monotonic_time () + 10 * 1000 * 1000;
   request_t *request;

   mongoc_mutex_lock (&server->mutex);
   if (server->running) {
      server->stopped = true;
   }
   mongoc_mutex_unlock (&server->mutex);

   while (bson_get_monotonic_time () <= deadline) {
      /* wait 10 seconds */
      mongoc_mutex_lock (&server->mutex);
      if (!server->running) {
         mongoc_mutex_unlock (&server->mutex);
         break;
      }

      mongoc_mutex_unlock (&server->mutex);
      _mongoc_usleep (1000);
   }

   mongoc_mutex_lock (&server->mutex);
   if (server->running) {
      fprintf (stderr, "server still running after timeout\n");
      fflush (stderr);
      abort ();
   }

   mongoc_mutex_unlock (&server->mutex);
   mongoc_thread_join (server->main_thread);

   _mongoc_array_destroy (&server->worker_threads);

   for (i = 0; i < server->autoresponders.len; i++) {
      handle = &_mongoc_array_index (&server->autoresponders,
                                     autoresponder_handle_t,
                                     i);

      autoresponder_handle_destroy (handle);
   }

   _mongoc_array_destroy (&server->autoresponders);

   mongoc_cond_destroy (&server->cond);
   mongoc_mutex_unlock (&server->mutex);
   mongoc_mutex_destroy (&server->mutex);
   mongoc_socket_destroy (server->sock);
   bson_free (server->uri_str);
   mongoc_uri_destroy (server->uri);

   while ((request = (request_t *) q_get_nowait (server->q))) {
      request_destroy (request);
   }

   q_destroy (server->q);
   bson_free (server);
}
void
_mongoc_write_command_insert_legacy (mongoc_write_command_t *command,
                                     mongoc_client_t *client,
                                     mongoc_server_stream_t *server_stream,
                                     const char *database,
                                     const char *collection,
                                     uint32_t offset,
                                     mongoc_write_result_t *result,
                                     bson_error_t *error)
{
   int64_t started;
   mongoc_iovec_t *iov;
   mongoc_rpc_t rpc;
   uint32_t size = 0;
   bool has_more;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   uint32_t n_docs_in_batch;
   uint32_t request_id = 0;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;
   int data_offset = 0;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
                      strlen (collection) + 1);

   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);
   while ((bson = bson_reader_read (reader, &eof))) {
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx <= command->n_documents);

      if (bson->len > max_bson_obj_size) {
         /* document is too large */
         _mongoc_write_command_too_large_error (
            error, idx, bson->len, max_bson_obj_size);

         data_offset += bson->len;

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) ||
                 size > (max_msg_size - bson->len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
         iov[n_docs_in_batch].iov_len = bson->len;
         size += bson->len;
         n_docs_in_batch++;
         data_offset += bson->len;
      }

      idx++;
   }
   bson_reader_destroy (reader);

   if (n_docs_in_batch) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags =
         ((command->flags.ordered) ? MONGOC_INSERT_NONE
                                   : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      _mongoc_monitor_legacy_write (
         client, command, database, collection, server_stream, request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }

cleanup:

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
bool
mongoc_stream_tls_handshake_block (mongoc_stream_t *stream,
                                   const char      *host,
                                   int32_t          timeout_msec,
                                   bson_error_t    *error)
{
   int events;
   ssize_t ret = 0;
   mongoc_stream_poll_t poller;
   int64_t now;
   int64_t expire = 0;

   if (timeout_msec >= 0) {
      expire = bson_get_monotonic_time () + (timeout_msec * 1000UL);
   }

   /*
    * error variables get re-used a lot. To prevent cross-contamination of error
    * messages, and still be able to provide a generic failure message when
    * mongoc_stream_tls_handshake fails without a specific reason, we need to init
    * the error code to 0.
    */
   if (error) {
      error->code = 0;
   }
   do {
      events = 0;

      if (mongoc_stream_tls_handshake (stream, host, timeout_msec, &events, error)) {
         return true;
      }

      if (events) {
         poller.stream = stream;
         poller.events = events;
         poller.revents = 0;

         if (expire) {
            now = bson_get_monotonic_time ();
            if ((expire - now) < 0) {
               bson_set_error (error,
                               MONGOC_ERROR_STREAM,
                               MONGOC_ERROR_STREAM_SOCKET,
                               "TLS handshake timedout.");
               return false;
            } else {
               timeout_msec = (expire - now) / 1000L;
            }
         }
         ret = mongoc_stream_poll (&poller, 1, timeout_msec);
      }
   } while (events && ret > 0);

   if (error && !error->code) {
      bson_set_error (error,
                      MONGOC_ERROR_STREAM,
                      MONGOC_ERROR_STREAM_SOCKET,
                      "TLS handshake failed.");
   }
   return false;
}
void
_mongoc_write_command_update_legacy (mongoc_write_command_t *command,
                                     mongoc_client_t *client,
                                     mongoc_server_stream_t *server_stream,
                                     const char *database,
                                     const char *collection,
                                     uint32_t offset,
                                     mongoc_write_result_t *result,
                                     bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bson_t update, selector;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         BSON_ASSERT (bson_init_static (&doc, data, len));

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      BSON_ASSERT (bson_iter_init (&subiter, bson));
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            BSON_ASSERT (bson_init_static (&update, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            BSON_ASSERT (bson_init_static (&selector, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
         }
      }

      _mongoc_monitor_legacy_write (
         client, command, database, collection, server_stream, request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
static bool
_mongoc_socket_wait (mongoc_socket_t *sock, /* IN */
                     int events,            /* IN */
                     int64_t expire_at)     /* IN */
{
#ifdef _WIN32
   fd_set read_fds;
   fd_set write_fds;
   fd_set error_fds;
   struct timeval timeout_tv;
#else
   struct pollfd pfd;
#endif
   int ret;
   int timeout;
   int64_t now;

   ENTRY;

   BSON_ASSERT (sock);
   BSON_ASSERT (events);

#ifdef _WIN32
   FD_ZERO (&read_fds);
   FD_ZERO (&write_fds);
   FD_ZERO (&error_fds);

   if (events & POLLIN) {
      FD_SET (sock->sd, &read_fds);
   }

   if (events & POLLOUT) {
      FD_SET (sock->sd, &write_fds);
   }

   FD_SET (sock->sd, &error_fds);
#else
   pfd.fd = sock->sd;
   pfd.events = events | POLLERR | POLLHUP;
   pfd.revents = 0;
#endif
   now = bson_get_monotonic_time ();

   for (;;) {
      if (expire_at < 0) {
         timeout = -1;
      } else if (expire_at == 0) {
         timeout = 0;
      } else {
         timeout = (int) ((expire_at - now) / 1000L);
         if (timeout < 0) {
            timeout = 0;
         }
      }

#ifdef _WIN32
      if (timeout == -1) {
         /* not WSAPoll: daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken */
         ret = select (0 /*unused*/, &read_fds, &write_fds, &error_fds, NULL);
      } else {
         timeout_tv.tv_sec = timeout / 1000;
         timeout_tv.tv_usec = (timeout % 1000) * 1000;
         ret = select (
            0 /*unused*/, &read_fds, &write_fds, &error_fds, &timeout_tv);
      }
      if (ret == SOCKET_ERROR) {
         _mongoc_socket_capture_errno (sock);
         ret = -1;
      } else if (FD_ISSET (sock->sd, &error_fds)) {
         errno = WSAECONNRESET;
         ret = -1;
      }
#else
      ret = poll (&pfd, 1, timeout);
#endif

      if (ret > 0) {
/* Something happened, so return that */
#ifdef _WIN32
         return (FD_ISSET (sock->sd, &read_fds) ||
                 FD_ISSET (sock->sd, &write_fds));
#else
         RETURN (0 != (pfd.revents & events));
#endif
      } else if (ret < 0) {
         /* poll itself failed */

         TRACE ("errno is: %d", errno);
         if (MONGOC_ERRNO_IS_AGAIN (errno)) {
            now = bson_get_monotonic_time ();

            if (expire_at < now) {
               _mongoc_socket_capture_errno (sock);
               RETURN (false);
            } else {
               continue;
            }
         } else {
            /* poll failed for some non-transient reason */
            _mongoc_socket_capture_errno (sock);
            RETURN (false);
         }
      } else {
         /* ret == 0, poll timed out */
         if (timeout) {
            mongoc_counter_streams_timeout_inc ();
         }
#ifdef _WIN32
         sock->errno_ = timeout ? WSAETIMEDOUT : EAGAIN;
#else
         sock->errno_ = timeout ? ETIMEDOUT : EAGAIN;
#endif
         RETURN (false);
      }
   }
}
/* This function is copypasta of _mongoc_stream_tls_openssl_readv */
static ssize_t
_mongoc_stream_tls_secure_transport_readv (mongoc_stream_t *stream,
                                           mongoc_iovec_t *iov,
                                           size_t iovcnt,
                                           size_t min_bytes,
                                           int32_t timeout_msec)
{
   mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
   mongoc_stream_tls_secure_transport_t *secure_transport =
      (mongoc_stream_tls_secure_transport_t *) tls->ctx;
   ssize_t ret = 0;
   size_t i;
   size_t read_ret;
   size_t iov_pos = 0;
   int64_t now;
   int64_t expire = 0;

   BSON_ASSERT (iov);
   BSON_ASSERT (iovcnt);
   BSON_ASSERT (secure_transport);
   ENTRY;

   tls->timeout_msec = timeout_msec;

   if (timeout_msec >= 0) {
      expire = bson_get_monotonic_time () + (timeout_msec * 1000UL);
   }

   for (i = 0; i < iovcnt; i++) {
      iov_pos = 0;

      while (iov_pos < iov[i].iov_len) {
         OSStatus status = SSLRead (secure_transport->ssl_ctx_ref,
                                    (char *) iov[i].iov_base + iov_pos,
                                    (int) (iov[i].iov_len - iov_pos),
                                    &read_ret);

         if (status != noErr) {
            RETURN (-1);
         }

         if (expire) {
            now = bson_get_monotonic_time ();

            if ((expire - now) < 0) {
               if (read_ret == 0) {
                  mongoc_counter_streams_timeout_inc ();
                  errno = ETIMEDOUT;
                  RETURN (-1);
               }

               tls->timeout_msec = 0;
            } else {
               tls->timeout_msec = (expire - now) / 1000L;
            }
         }

         ret += read_ret;

         if ((size_t) ret >= min_bytes) {
            mongoc_counter_streams_ingress_add (ret);
            RETURN (ret);
         }

         iov_pos += read_ret;
      }
   }

   if (ret >= 0) {
      mongoc_counter_streams_ingress_add (ret);
   }

   RETURN (ret);
}
Exemple #24
0
void
_mongoc_write_command_delete_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   const uint8_t *data;
   mongoc_rpc_t rpc;
   uint32_t request_id;
   bson_iter_t q_iter;
   uint32_t len;
   int64_t limit = 0;
   bson_t *gle = NULL;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   bool r;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_DELETE_FAILED,
                      "Cannot do an empty delete.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
      r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") &&
           BSON_ITER_HOLDS_DOCUMENT (&q_iter));

      BSON_ASSERT (r);
      bson_iter_document (&q_iter, &len, &data);
      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);
      if (len > max_bson_obj_size) {
         _mongoc_write_command_too_large_error (
            error, 0, len, max_bson_obj_size, NULL);
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_DELETE;
      rpc.delete_.zero = 0;
      rpc.delete_.collection = ns;

      if (bson_iter_find (&q_iter, "limit") &&
          (BSON_ITER_HOLDS_INT (&q_iter))) {
         limit = bson_iter_as_int64 (&q_iter);
      }

      rpc.delete_.flags =
         limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
      rpc.delete_.selector = data;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_DELETE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);

   EXIT;
}
static ssize_t
_mongoc_stream_tls_readv (mongoc_stream_t *stream,
                          mongoc_iovec_t  *iov,
                          size_t           iovcnt,
                          size_t           min_bytes,
                          int32_t          timeout_msec)
{
   mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *)stream;
   ssize_t ret = 0;
   size_t i;
   int read_ret;
   size_t iov_pos = 0;
   int64_t now;
   int64_t expire = 0;

   BSON_ASSERT (tls);
   BSON_ASSERT (iov);
   BSON_ASSERT (iovcnt);

   tls->timeout_msec = timeout_msec;

   if (timeout_msec >= 0) {
      expire = bson_get_monotonic_time () + (timeout_msec * 1000UL);
   }

   for (i = 0; i < iovcnt; i++) {
      iov_pos = 0;

      while (iov_pos < iov[i].iov_len) {
         read_ret = BIO_read (tls->bio, (char *)iov[i].iov_base + iov_pos,
                              (int)(iov[i].iov_len - iov_pos));

         if (read_ret < 0) {
            return read_ret;
         }

         if (expire) {
            now = bson_get_monotonic_time ();

            if ((expire - now) < 0) {
               if (read_ret == 0) {
                  mongoc_counter_streams_timeout_inc();
#ifdef _WIN32
                  errno = WSAETIMEDOUT;
#else
                  errno = ETIMEDOUT;
#endif
                  return -1;
               }

               tls->timeout_msec = 0;
            } else {
               tls->timeout_msec = (expire - now) / 1000L;
            }
         }

         ret += read_ret;

         if ((size_t)ret >= min_bytes) {
            mongoc_counter_streams_ingress_add(ret);
            return ret;
         }

         iov_pos += read_ret;
      }
   }

   if (ret >= 0) {
      mongoc_counter_streams_ingress_add(ret);
   }

   return ret;
}
Exemple #26
0
void
_mongoc_write_command_update_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bool has_update, has_selector, is_upsert;
   bson_t update, selector;
   bson_t *gle = NULL;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int32_t affected = 0;
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         bson_init_static (&doc, data, len);

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      has_update = false;
      has_selector = false;
      is_upsert = false;

      bson_iter_init (&subiter, bson);
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            bson_init_static (&update, data, len);
            has_update = true;
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            bson_init_static (&selector, data, len);
            has_selector = true;
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
            is_upsert = true;
         }
      }

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         if (bson_iter_init_find (&subiter, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&subiter)) {
            affected = bson_iter_int32 (&subiter);
         }

         /*
          * CDRIVER-372:
          *
          * Versions of MongoDB before 2.6 don't return the _id for an
          * upsert if _id is not an ObjectId.
          */
         if (is_upsert && affected &&
             !bson_iter_init_find (&subiter, gle, "upserted") &&
             bson_iter_init_find (&subiter, gle, "updatedExisting") &&
             BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) {
            if (has_update && bson_iter_init_find (&subiter, &update, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            } else if (has_selector &&
                       bson_iter_init_find (&subiter, &selector, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            }
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_UPDATE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
Exemple #27
0
void
_mongoc_write_command_insert_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   uint32_t current_offset;
   mongoc_iovec_t *iov;
   mongoc_rpc_t rpc;
   bson_t *gle = NULL;
   uint32_t size = 0;
   bool has_more;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   uint32_t n_docs_in_batch;
   uint32_t request_id = 0;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;
   int data_offset = 0;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   started = bson_get_monotonic_time ();
   current_offset = offset;

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
                      strlen (collection) + 1);

   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);
   while ((bson = bson_reader_read (reader, &eof))) {
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx <= command->n_documents);

      if (bson->len > max_bson_obj_size) {
         /* document is too large */
         bson_t write_err_doc = BSON_INITIALIZER;

         _mongoc_write_command_too_large_error (
            error, idx, bson->len, max_bson_obj_size, &write_err_doc);

         _mongoc_write_result_merge_legacy (
            result,
            command,
            &write_err_doc,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_INSERT_FAILED,
            offset + idx);

         bson_destroy (&write_err_doc);
         data_offset += bson->len;

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) ||
                 size > (max_msg_size - bson->len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
         iov[n_docs_in_batch].iov_len = bson->len;
         size += bson->len;
         n_docs_in_batch++;
         data_offset += bson->len;
      }

      idx++;
   }
   bson_reader_destroy (reader);

   if (n_docs_in_batch) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags =
         ((command->flags.ordered) ? MONGOC_INSERT_NONE
                                   : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         bool err = false;
         bson_iter_t citer;

         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            GOTO (cleanup);
         }

         err = (bson_iter_init_find (&citer, gle, "err") &&
                bson_iter_as_bool (&citer));

         /*
          * Overwrite the "n" field since it will be zero. Otherwise, our
          * merge_legacy code will not know how many we tried in this batch.
          */
         if (!err && bson_iter_init_find (&citer, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) {
            bson_iter_overwrite_int32 (&citer, n_docs_in_batch);
         }
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }

cleanup:

   if (gle) {
      _mongoc_write_result_merge_legacy (result,
                                         command,
                                         gle,
                                         client->error_api_version,
                                         MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                                         current_offset);

      current_offset = offset + idx;
      bson_destroy (gle);
      gle = NULL;
   }

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
static mongoc_stream_t *
mongoc_client_connect_tcp (const mongoc_uri_t       *uri,
                           const mongoc_host_list_t *host,
                           bson_error_t             *error)
{
   mongoc_socket_t *sock = NULL;
   struct addrinfo hints;
   struct addrinfo *result, *rp;
   int32_t connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS;
   int64_t expire_at;
   const bson_t *options;
   bson_iter_t iter;
   char portstr [8];
   int s;

   ENTRY;

   bson_return_val_if_fail (uri, NULL);
   bson_return_val_if_fail (host, NULL);

   if ((options = mongoc_uri_get_options (uri)) &&
       bson_iter_init_find (&iter, options, "connecttimeoutms") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      if (!(connecttimeoutms = bson_iter_int32(&iter))) {
         connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS;
      }
   }

   BSON_ASSERT (connecttimeoutms);
   expire_at = bson_get_monotonic_time () + (connecttimeoutms * 1000L);

   bson_snprintf (portstr, sizeof portstr, "%hu", host->port);

   memset (&hints, 0, sizeof hints);
   hints.ai_family = host->family;
   hints.ai_socktype = SOCK_STREAM;
   hints.ai_flags = 0;
   hints.ai_protocol = 0;

   s = getaddrinfo (host->host, portstr, &hints, &result);

   if (s != 0) {
      mongoc_counter_dns_failure_inc ();
      bson_set_error(error,
                     MONGOC_ERROR_STREAM,
                     MONGOC_ERROR_STREAM_NAME_RESOLUTION,
                     "Failed to resolve %s",
                     host->host);
      RETURN (NULL);
   }

   mongoc_counter_dns_success_inc ();

   for (rp = result; rp; rp = rp->ai_next) {
      /*
       * Create a new non-blocking socket.
       */
      if (!(sock = mongoc_socket_new (rp->ai_family,
                                      rp->ai_socktype,
                                      rp->ai_protocol))) {
         continue;
      }

      /*
       * Try to connect to the peer.
       */
      if (0 != mongoc_socket_connect (sock,
                                      rp->ai_addr,
                                      (socklen_t)rp->ai_addrlen,
                                      expire_at)) {
         char errmsg_buf[32];
         const char * errmsg;

         errmsg = bson_strerror_r (mongoc_socket_errno (sock), errmsg_buf, sizeof errmsg_buf);
         MONGOC_WARNING ("Failed to connect, error: %d, %s\n",
                         mongoc_socket_errno(sock),
                         errmsg);
         mongoc_socket_destroy (sock);
         sock = NULL;
         continue;
      }

      break;
   }

   if (!sock) {
      bson_set_error (error,
                      MONGOC_ERROR_STREAM,
                      MONGOC_ERROR_STREAM_CONNECT,
                      "Failed to connect to target host: %s",
                      host->host_and_port);
      freeaddrinfo (result);
      RETURN (NULL);
   }

   freeaddrinfo (result);

   return mongoc_stream_socket_new (sock);
}
Exemple #29
0
static ssize_t
mongoc_stream_unix_readv (mongoc_stream_t *stream,
                          struct iovec    *iov,
                          size_t           iovcnt,
                          bson_uint32_t    timeout_msec)
{
   mongoc_stream_unix_t *file = (mongoc_stream_unix_t *)stream;
   struct msghdr msg;
   struct pollfd fds;
   bson_int64_t now;
   bson_int64_t expire;
   size_t cur = 0;
   ssize_t written;
   ssize_t ret = 0;
   int flags = 0;
   int timeout;

   bson_return_val_if_fail(stream, -1);
   bson_return_val_if_fail(iov, -1);
   bson_return_val_if_fail(iovcnt, -1);
   bson_return_val_if_fail(timeout_msec <= INT_MAX, -1);

   /*
    * NOTE: Thar' be dragons.
    *
    * This function is complex. It combines vectored read from a socket or
    * file descriptor with a timeout as needed by the wtimeout requirements of
    * the MongoDB driver.
    *
    * poll() is used for it's portability in waiting for available I/O on a
    * non-blocking socket or file descriptor.
    *
    * To allow for efficient vectored I/O operations on the descriptor, we
    * use recvmsg() instead of recv(). However, recvmsg() does not support
    * regular file-descriptors and therefore we must fallback to writev()
    * if we detect such a case. This is fine since we don't actually use
    * regular file-descriptors (just socket descriptors) during production
    * use. Files are only used in test cases.
    *
    * We apply a default timeout if one has not been provided. The default
    * is one hour. If this is not sufficient, try TCP over bongo drums.
    */

   if (file->fd == -1) {
      errno = EBADF;
      return -1;
   }

   if (!timeout_msec) {
      timeout_msec = MONGOC_DEFAULT_TIMEOUT_MSEC;
   }

   /*
    * We require a monotonic clock for determining out timeout interval. This
    * is so that we are resilient to changes in the underlying wall clock,
    * such as during timezone changes. The monotonic clock is in microseconds
    * since an unknown epoch (but often system startup).
    */
   expire = bson_get_monotonic_time() + (timeout_msec * 1000UL);

   /*
    * Prepare our pollfd. If POLLRDHUP is supported, we can get notified of
    * our peer hang-up.
    */
   fds.fd = file->fd;
   fds.events = (POLLIN | POLLERR);
#ifdef POLLRDHUP
   fds.events |= POLLRDHUP;
#endif

   for (;;) {
      /*
       * Build our message for recvmsg() taking into account that we may have
       * already done a short-read and must increment the iovec.
       */
      msg.msg_name = NULL;
      msg.msg_namelen = 0;
      msg.msg_iov = iov + cur;
      msg.msg_iovlen = iovcnt - cur;
      msg.msg_control = NULL;
      msg.msg_controllen = 0;
      msg.msg_flags = 0;

      BSON_ASSERT(msg.msg_iov->iov_len);
      BSON_ASSERT(cur < iovcnt);

      fds.revents = 0;

      /*
       * Determine number of milliseconds until timeout expires.
       */
      now = bson_get_monotonic_time();
      timeout = MAX(0, (expire - now) / 1000UL);

      /*
       * Block on poll() until data is available or timeout. Upont timeout,
       * synthesize an errno of ETIMEDOUT.
       */
      errno = 0;
      fds.revents = 0;
      written = poll(&fds, 1, timeout);
      if (written == -1) {
         return -1;
      } else if (written == 0) {
         errno = ETIMEDOUT;
         return -1;
      }

      /*
       * Perform recvmsg() on socket to receive available data. If it turns
       * out this is not a socket, fall back to readv(). This should only
       * happen during unit tests.
       */
      errno = 0;
      written = TEMP_FAILURE_RETRY(recvmsg(file->fd, &msg, flags));
      if (written == -1 && errno == ENOTSOCK) {
         written = TEMP_FAILURE_RETRY(readv(file->fd, iov + cur, iovcnt - cur));
         if (!written) {
            return ret;
         }
      }

      /*
       * If our recvmsg() failed, we can't do much now can we.
       */
      if (written == -1) {
         return written;
      } else {
         ret += written;
      }

      /*
       * Increment iovec's in the case we got a short read. Break out if
       * we have read all our expected data.
       */
      while ((cur < iovcnt) && (written >= iov[cur].iov_len)) {
         BSON_ASSERT(iov[cur].iov_len);
         written -= iov[cur++].iov_len;
         BSON_ASSERT(cur <= iovcnt);
      }
      if (cur == iovcnt) {
         break;
      }
      BSON_ASSERT(cur < iovcnt);
      iov[cur].iov_base = ((bson_uint8_t *)iov[cur].iov_base) + written;
      iov[cur].iov_len -= written;

      BSON_ASSERT(iovcnt - cur);
      BSON_ASSERT(iov[cur].iov_len);
   }

   return ret;
}