コード例 #1
0
ファイル: bson_codec.cpp プロジェクト: changnet/MServer
/* 解码数据包
 * return: <0 error,otherwise the number of parameter push to stack
 */
int32 bson_codec::decode(
    lua_State *L,const char *buffer,int32 len,const cmd_cfg_t *cfg )
{
    UNUSED( cfg );

    bson_reader_t *reader = 
        bson_reader_new_from_data( (const uint8_t *)buffer,len );

    const bson_t *doc = bson_reader_read( reader,NULL );
    if ( !doc )
    {
        ERROR( "invalid bson buffer" );
        bson_reader_destroy( reader );
        return -1;
    }

    struct error_collector ec;
    ec.what[0] = 0;
    int32 args = lbs_do_decode_stack( L,doc,&ec );

    bson_reader_destroy( reader );
    if ( args < 0 )
    {
        ERROR( "bson decode:%s",ec.what );
        return -1;
    }

    return args;
}
コード例 #2
0
ファイル: cbson.c プロジェクト: HeliumProject/libbson
static PyObject *
cbson_loads (PyObject *self,
             PyObject *args)
{
   const bson_uint8_t *buffer;
   bson_uint32_t buffer_length;
   bson_reader_t reader;
   bson_iter_t iter;
   const bson_t *b;
   bson_bool_t eof = FALSE;
   PyObject *ret = NULL;
   PyObject *dict;

   if (!PyArg_ParseTuple(args, "s#", &buffer, &buffer_length)) {
      return NULL;
   }

   ret = PyList_New(0);

   bson_reader_init_from_data(&reader, buffer, buffer_length);

   if (!(b = bson_reader_read(&reader, &eof))) {
      PyErr_SetString(PyExc_ValueError, "Failed to parse buffer.");
      goto failure;
   }

   do {
      if (!bson_iter_init(&iter, b)) {
         bson_reader_destroy(&reader);
         goto failure;
      }
      dict = PyDict_New();
      bson_iter_visit_all(&iter, &gLoadsVisitors, &dict);
      if (dict) {
         PyList_Append(ret, dict);
         Py_DECREF(dict);
      }
   } while ((b = bson_reader_read(&reader, &eof)));

   bson_reader_destroy(&reader);

   if (!eof) {
      PyErr_SetString(PyExc_ValueError, "Buffer contained invalid BSON.");
      goto failure;
   }

   return ret;

failure:
   Py_XDECREF(ret);
   return NULL;
}
コード例 #3
0
static void
test_reader_from_fd (void)
{
   bson_reader_t *reader;
   const bson_t *b;
   bson_uint32_t i;
   bson_iter_t iter;
   bson_bool_t eof;
   int fd;

   fd  = open("tests/binary/stream.bson", O_RDONLY);
   assert(fd >= 0);

   reader = bson_reader_new_from_fd(fd, TRUE);

   for (i = 0; i < 1000; i++) {
      eof = FALSE;
      b = bson_reader_read(reader, &eof);
      assert(b);
      assert(bson_iter_init(&iter, b));
      assert(!bson_iter_next(&iter));
   }

   assert_cmpint(eof, ==, FALSE);
   b = bson_reader_read(reader, &eof);
   assert(!b);
   assert_cmpint(eof, ==, TRUE);
   bson_reader_destroy(reader);
}
コード例 #4
0
ファイル: mongoc-write-command.c プロジェクト: cran/mongolite
void
_append_array_from_command (mongoc_write_command_t *command, bson_t *bson)
{
   bson_t ar;
   bson_reader_t *reader;
   char str[16];
   uint32_t i = 0;
   const char *key;
   bool eof;
   const bson_t *current;


   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);

   bson_append_array_begin (bson,
                            gCommandFields[command->type],
                            gCommandFieldLens[command->type],
                            &ar);

   while ((current = bson_reader_read (reader, &eof))) {
      bson_uint32_to_string (i, &key, str, sizeof str);
      BSON_APPEND_DOCUMENT (&ar, key, current);
      i++;
   }

   bson_append_array_end (bson, &ar);
   bson_reader_destroy (reader);
}
コード例 #5
0
ファイル: test-reader.c プロジェクト: 3rduncle/libbson
static void
test_reader_from_handle (void)
{
   bson_reader_t *reader;
   const bson_t *b;
   bson_iter_t iter;
   uint32_t i;
   bool eof;
   int fd;

   fd  = bson_open(BINARY_DIR"/stream.bson", O_RDONLY);
   assert(-1 != fd);

   reader = bson_reader_new_from_handle ((void *)&fd,
                                         &test_reader_from_handle_read,
                                         &test_reader_from_handle_destroy);

   for (i = 0; i < 1000; i++) {
      eof = false;
      b = bson_reader_read(reader, &eof);
      assert(b);
      assert(bson_iter_init(&iter, b));
      assert(!bson_iter_next(&iter));
   }

   assert_cmpint(eof, ==, false);
   b = bson_reader_read(reader, &eof);
   assert(!b);
   assert_cmpint(eof, ==, true);
   bson_reader_destroy(reader);
}
コード例 #6
0
int
main (int argc,
      char *argv[])
{
   mongoc_matcher_t *matcher;
   bson_reader_t *reader;
   const bson_t *bson;
   bson_t *spec;
   char *str;

   reader = bson_reader_new_from_handle ((void *)&MONGOC_STDIN_FILENO, &_read_cb, &_destroy_cb);
   spec = BCON_NEW ("hello", "world");
   matcher = mongoc_matcher_new (spec, NULL);

   while ((bson = bson_reader_read (reader, NULL))) {
      if (mongoc_matcher_match (matcher, bson)) {
         str = bson_as_json (bson, NULL);
         printf ("%s\n", str);
         bson_free (str);
      }
   }

   bson_reader_destroy (reader);
   bson_destroy (spec);

   return 0;
}
コード例 #7
0
ファイル: cbson.c プロジェクト: HeliumProject/libbson
static PyObject *
cbson_as_json (PyObject *self,
               PyObject *args)
{
   const bson_uint8_t *buffer;
   bson_uint32_t buffer_length;
   bson_reader_t reader;
   const bson_t *b;
   PyObject *ret = NULL;
   size_t len = 0;
   char *str;

   if (!PyArg_ParseTuple(args, "s#", &buffer, &buffer_length)) {
      return NULL;
   }

   bson_reader_init_from_data(&reader, buffer, buffer_length);
   b = bson_reader_read(&reader, NULL);
   bson_reader_destroy(&reader);

   if (b) {
      str = bson_as_json(b, &len);
      ret = PyUnicode_DecodeUTF8(str, len, "strict");
      bson_free(str);
   } else {
      PyErr_SetString(PyExc_ValueError, "Failed to parse BSON document.");
   }

   return ret;
}
コード例 #8
0
ファイル: reader.c プロジェクト: cran/mongolite
SEXP R_mongo_restore(SEXP con, SEXP ptr_col, SEXP verb) {
  bool verbose = Rf_asLogical(verb);
  mongoc_collection_t *col = r2col(ptr_col);
  bson_reader_t *reader = bson_reader_new_from_handle(con, bson_reader_feed, bson_reader_finalize);
  mongoc_bulk_operation_t *bulk = NULL;

  const bson_t *b;
  bson_error_t err;
  int count = 0;
  int i = 0;
  bool done = false;
  bson_t reply;

  while(!done) {
    //note: default opts uses {ordered:true}
    bulk = mongoc_collection_create_bulk_operation_with_opts(col, NULL);
    for(i = 0; i < 1000; i++){
      if(!(b = bson_reader_read (reader, &done)))
        break;
      mongoc_bulk_operation_insert (bulk, b);
      count++;
    }

    if(i == 0)
      break;

    if(!mongoc_bulk_operation_execute (bulk, &reply, &err)){
      bson_reader_destroy(reader);
      mongoc_bulk_operation_destroy (bulk);
      Rf_error(err.message);
    }

    if(verbose)
      Rprintf("\rRestored %d records...", count);
  }

  if(verbose)
    Rprintf("\rDone! Inserted total of %d records.\n", count);

  if (!done)
    Rf_warning("Failed to read all documents.\n");

  bson_reader_destroy(reader);
  mongoc_bulk_operation_destroy (bulk);
  return Rf_ScalarInteger(count);
}
コード例 #9
0
ファイル: test-reader.c プロジェクト: 3rduncle/libbson
static void
test_reader_from_data_document_length_too_small (void)
{
   bson_reader_t *reader;
   uint8_t *buffer;
   bool eof = false;

   buffer = bson_malloc0(5);
   buffer[0] = 4;

   reader = bson_reader_new_from_data(buffer, 5);
   assert(!bson_reader_read(reader, &eof));
   assert_cmpint(eof, ==, false);

   bson_free(buffer);

   bson_reader_destroy(reader);
}
コード例 #10
0
Array
cbson_loads_from_string (const String& bson) 
{
  bson_reader_t * reader;
  const bson_t * obj;
  bool reached_eof;

  Array output = Array();

  reader = bson_reader_new_from_data((uint8_t *)bson.c_str(), bson.size());
  
  if (!(obj = bson_reader_read(reader, &reached_eof))) {
    mongoThrow<MongoException>("Unexpected end of BSON. Input document is likely corrupted!");
  }  

  output = cbson_loads(obj);
  bson_reader_destroy(reader);

  return output;
}
コード例 #11
0
ファイル: test-reader.c プロジェクト: 3rduncle/libbson
static void
test_reader_from_data_overflow (void)
{
   bson_reader_t *reader;
   uint8_t *buffer;
   const bson_t *b;
   uint32_t i;
   bool eof = false;

   buffer = bson_malloc0(4096);
   for (i = 0; i < 4095; i += 5) {
      buffer[i] = 5;
   }

   buffer[4095] = 5;

   reader = bson_reader_new_from_data(buffer, 4096);

   for (i = 0; (b = bson_reader_read(reader, &eof)); i++) {
      const uint8_t *buf = bson_get_data(b);
      assert(b->len == 5);
      assert(buf[0] == 5);
      assert(buf[1] == 0);
      assert(buf[2] == 0);
      assert(buf[3] == 0);
      assert(buf[4] == 0);
      eof = false;
   }

   assert(i == (4095/5));

   assert_cmpint(eof, ==, false);

   bson_free(buffer);

   bson_reader_destroy(reader);
}
コード例 #12
0
static void
test_reader_from_data_overflow (void)
{
   bson_reader_t reader;
   bson_uint8_t *buffer;
   const bson_t *b;
   bson_uint32_t i;
   bson_bool_t eof = FALSE;

   buffer = bson_malloc0(4096);
   for (i = 0; i < 4095; i += 5) {
      buffer[i] = 5;
   }

   buffer[4095] = 5;

   bson_reader_init_from_data(&reader, buffer, 4096);

   for (i = 0; (b = bson_reader_read(&reader, &eof)); i++) {
      const bson_uint8_t *buf = bson_get_data(b);
      assert(b->len == 5);
      assert(buf[0] == 5);
      assert(buf[1] == 0);
      assert(buf[2] == 0);
      assert(buf[3] == 0);
      assert(buf[4] == 0);
      eof = FALSE;
   }

   assert(i == (4095/5));

   assert_cmpint(eof, ==, FALSE);

   bson_free(buffer);

   bson_reader_destroy(&reader);
}
コード例 #13
0
void
_mongoc_cursor_destroy (mongoc_cursor_t *cursor)
{
   ENTRY;

   bson_return_if_fail(cursor);

   if (cursor->in_exhaust) {
      cursor->client->in_exhaust = FALSE;

      if (!cursor->done) {
         _mongoc_cluster_disconnect_node (
            &cursor->client->cluster,
            &cursor->client->cluster.nodes[cursor->hint - 1]);
      }
   } else if (cursor->rpc.reply.cursor_id) {
      _mongoc_cursor_kill_cursor(cursor, cursor->rpc.reply.cursor_id);
   }

   if (cursor->reader) {
      bson_reader_destroy(cursor->reader);
      cursor->reader = NULL;
   }

   bson_destroy(&cursor->query);
   bson_destroy(&cursor->fields);
   _mongoc_buffer_destroy(&cursor->buffer);
   mongoc_read_prefs_destroy(cursor->read_prefs);

   bson_free(cursor);

   mongoc_counter_cursors_active_dec();
   mongoc_counter_cursors_disposed_inc();

   EXIT;
}
コード例 #14
0
static void
test_reader_from_data (void)
{
   bson_reader_t *reader;
   bson_uint8_t *buffer;
   const bson_t *b;
   bson_uint32_t i;
   bson_bool_t eof = FALSE;

   buffer = bson_malloc0(4095);
   for (i = 0; i < 4095; i += 5) {
      buffer[i] = 5;
   }

   reader = bson_reader_new_from_data(buffer, 4095);

   for (i = 0; (b = bson_reader_read(reader, &eof)); i++) {
      const bson_uint8_t *buf = bson_get_data(b);

      /* do nothing */
      assert(b->len == 5);
      assert(buf[0] == 5);
      assert(buf[1] == 0);
      assert(buf[2] == 0);
      assert(buf[3] == 0);
      assert(buf[4] == 0);
   }

   assert(i == (4095/5));

   assert_cmpint(eof, ==, TRUE);

   bson_free(buffer);

   bson_reader_destroy(reader);
}
コード例 #15
0
static bson_bool_t
_mongoc_cursor_get_more (mongoc_cursor_t *cursor)
{
   bson_uint64_t cursor_id;
   bson_uint32_t request_id;
   mongoc_rpc_t rpc;

   ENTRY;

   BSON_ASSERT(cursor);

   if (! cursor->in_exhaust) {
      if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) {
         cursor->failed = TRUE;
         RETURN (FALSE);
      }

      if (!(cursor_id = cursor->rpc.reply.cursor_id)) {
         bson_set_error(&cursor->error,
                        MONGOC_ERROR_CURSOR,
                        MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                        "No valid cursor was provided.");
         goto failure;
      }

      rpc.get_more.msg_len = 0;
      rpc.get_more.request_id = 0;
      rpc.get_more.response_to = 0;
      rpc.get_more.opcode = MONGOC_OPCODE_GET_MORE;
      rpc.get_more.zero = 0;
      rpc.get_more.collection = cursor->ns;
      if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) {
         rpc.get_more.n_return = 0;
      } else {
         rpc.get_more.n_return = _mongoc_n_return(cursor);
      }
      rpc.get_more.cursor_id = cursor_id;

      /*
       * TODO: Stamp protections for disconnections.
       */

      if (!_mongoc_client_sendv(cursor->client, &rpc, 1, cursor->hint,
                                NULL, cursor->read_prefs, &cursor->error)) {
         cursor->done = TRUE;
         cursor->failed = TRUE;
         RETURN(FALSE);
      }

      request_id = BSON_UINT32_FROM_LE(rpc.header.request_id);
   } else {
      request_id = BSON_UINT32_FROM_LE(cursor->rpc.header.request_id);
   }

   _mongoc_buffer_clear(&cursor->buffer, FALSE);

   if (!_mongoc_client_recv(cursor->client,
                            &cursor->rpc,
                            &cursor->buffer,
                            cursor->hint,
                            &cursor->error)) {
      goto failure;
   }

   if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) ||
       (cursor->rpc.header.response_to != request_id)) {
      bson_set_error(&cursor->error,
                     MONGOC_ERROR_PROTOCOL,
                     MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
                     "A reply to an invalid request id was received.");
      goto failure;
   }

   if (_mongoc_cursor_unwrap_failure(cursor)) {
      goto failure;
   }

   if (cursor->reader) {
      bson_reader_destroy(cursor->reader);
   }

   cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents,
                                              cursor->rpc.reply.documents_len);

   cursor->end_of_event = FALSE;

   RETURN(TRUE);

failure:
   cursor->done = TRUE;
   cursor->failed = TRUE;

   RETURN(FALSE);
}
コード例 #16
0
static bson_bool_t
_mongoc_cursor_query (mongoc_cursor_t *cursor)
{
   bson_uint32_t hint;
   bson_uint32_t request_id;
   mongoc_rpc_t rpc;

   ENTRY;

   bson_return_val_if_fail(cursor, FALSE);

   if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) {
      cursor->failed = TRUE;
      RETURN (FALSE);
   }

   rpc.query.msg_len = 0;
   rpc.query.request_id = 0;
   rpc.query.response_to = 0;
   rpc.query.opcode = MONGOC_OPCODE_QUERY;
   rpc.query.flags = cursor->flags;
   rpc.query.collection = cursor->ns;
   rpc.query.skip = cursor->skip;
   if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) {
      rpc.query.n_return = 0;
   } else {
      rpc.query.n_return = _mongoc_n_return(cursor);
   }
   rpc.query.query = bson_get_data(&cursor->query);
   rpc.query.fields = bson_get_data(&cursor->fields);

   if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1, 0,
                                      NULL, cursor->read_prefs,
                                      &cursor->error))) {
      goto failure;
   }

   cursor->hint = hint;
   request_id = BSON_UINT32_FROM_LE(rpc.header.request_id);

   _mongoc_buffer_clear(&cursor->buffer, FALSE);

   if (!_mongoc_client_recv(cursor->client,
                            &cursor->rpc,
                            &cursor->buffer,
                            hint,
                            &cursor->error)) {
      goto failure;
   }

   if ((cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) ||
       (cursor->rpc.header.response_to != request_id)) {
      bson_set_error(&cursor->error,
                     MONGOC_ERROR_PROTOCOL,
                     MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
                     "A reply to an invalid request id was received.");
      goto failure;
   }

   if (_mongoc_cursor_unwrap_failure(cursor)) {
      if ((cursor->error.domain == MONGOC_ERROR_QUERY) &&
          (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) {
         cursor->failed = TRUE;
      }
      goto failure;
   }

   if (cursor->reader) {
      bson_reader_destroy(cursor->reader);
   }

   cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents,
                                              cursor->rpc.reply.documents_len);

   if (cursor->flags & MONGOC_QUERY_EXHAUST) {
      cursor->in_exhaust = TRUE;
      cursor->client->in_exhaust = TRUE;
   }

   cursor->done = FALSE;
   cursor->end_of_event = FALSE;
   cursor->sent = TRUE;
   RETURN(TRUE);

failure:
   cursor->failed = TRUE;
   cursor->done = TRUE;
   RETURN(FALSE);
}
コード例 #17
0
void
_mongoc_write_command_update_legacy (mongoc_write_command_t *command,
                                     mongoc_client_t *client,
                                     mongoc_server_stream_t *server_stream,
                                     const char *database,
                                     const char *collection,
                                     uint32_t offset,
                                     mongoc_write_result_t *result,
                                     bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bson_t update, selector;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         BSON_ASSERT (bson_init_static (&doc, data, len));

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      BSON_ASSERT (bson_iter_init (&subiter, bson));
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            BSON_ASSERT (bson_init_static (&update, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            BSON_ASSERT (bson_init_static (&selector, data, len));
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
         }
      }

      _mongoc_monitor_legacy_write (
         client, command, database, collection, server_stream, request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
コード例 #18
0
void
_mongoc_write_command_delete_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   const uint8_t *data;
   mongoc_rpc_t rpc;
   uint32_t request_id;
   bson_iter_t q_iter;
   uint32_t len;
   int64_t limit = 0;
   bson_t *gle = NULL;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   bool r;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_DELETE_FAILED,
                      "Cannot do an empty delete.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
      r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") &&
           BSON_ITER_HOLDS_DOCUMENT (&q_iter));

      BSON_ASSERT (r);
      bson_iter_document (&q_iter, &len, &data);
      BSON_ASSERT (data);
      BSON_ASSERT (len >= 5);
      if (len > max_bson_obj_size) {
         _mongoc_write_command_too_large_error (
            error, 0, len, max_bson_obj_size, NULL);
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_DELETE;
      rpc.delete_.zero = 0;
      rpc.delete_.collection = ns;

      if (bson_iter_find (&q_iter, "limit") &&
          (BSON_ITER_HOLDS_INT (&q_iter))) {
         limit = bson_iter_as_int64 (&q_iter);
      }

      rpc.delete_.flags =
         limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
      rpc.delete_.selector = data;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_DELETE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);

   EXIT;
}
コード例 #19
0
/*
 * main --
 *
 *       Connects to a server and reads BSON from it. This program takes the
 *       following command line options:
 *
 *          -h              Print this help and exit.
 *          -s SERVER_NAME  Specify the host name of the server.
 *          -p PORT_NUM     Specify the port number to connect to on the server.
 */
int
main (int   argc,
      char *argv[])
{
   bson_reader_t  *reader;
   char           *hostname = NULL;
   char           *json;
   char           *port = NULL;
   const bson_t   *document;
   int             fd;
   int             opt;

   opterr = 1;

   /*
    * Parse command line arguments.
    */
   while ((opt = getopt (argc, argv, "hs:p:")) != -1) {
      switch (opt) {
      case 'h':
         fprintf (stdout,
                  "Usage: %s [-s SERVER_NAME] [-p PORT_NUM]\n",
                  argv[0]);
         return EXIT_SUCCESS;
      case 'p':
         free (port);
         port = (char *)malloc (strlen (optarg));
         strcpy (port, optarg);
         break;
      case 's':
         free (hostname);
         hostname = (char *)malloc (strlen (optarg));
         strcpy (hostname, optarg);
         break;
      default:
         fprintf (stderr, "Unknown option: %s\n", optarg);
      }
   }

   /*
    * Open a file descriptor on the remote and read in BSON documents, one by
    * one. As an example of processing, this prints the incoming documents as
    * JSON onto STDOUT.
    */
   fd = bson_streaming_remote_open (hostname, port);
   if (fd == -1) {
      return EXIT_FAILURE;
   }

   reader = bson_reader_new_from_fd (fd, true);
   while ((document = bson_reader_read (reader, NULL))) {
      json = bson_as_json (document, NULL);
      fprintf (stdout, "%s\n", json);
      bson_free (json);
   }

   /*
    * Destroy the reader, which performs cleanup. The ``true'' argument passed
    * to bson_reader_new_from_fd tells libbson to close the file descriptor on
    * destruction.
    */
   bson_reader_destroy (reader);
   free (hostname);
   free (port);
   return EXIT_SUCCESS;
}
コード例 #20
0
void
_mongoc_write_command_insert_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   uint32_t current_offset;
   mongoc_iovec_t *iov;
   mongoc_rpc_t rpc;
   bson_t *gle = NULL;
   uint32_t size = 0;
   bool has_more;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   uint32_t n_docs_in_batch;
   uint32_t request_id = 0;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;
   int data_offset = 0;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   started = bson_get_monotonic_time ();
   current_offset = offset;

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
                      strlen (collection) + 1);

   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);
   while ((bson = bson_reader_read (reader, &eof))) {
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx <= command->n_documents);

      if (bson->len > max_bson_obj_size) {
         /* document is too large */
         bson_t write_err_doc = BSON_INITIALIZER;

         _mongoc_write_command_too_large_error (
            error, idx, bson->len, max_bson_obj_size, &write_err_doc);

         _mongoc_write_result_merge_legacy (
            result,
            command,
            &write_err_doc,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_INSERT_FAILED,
            offset + idx);

         bson_destroy (&write_err_doc);
         data_offset += bson->len;

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) ||
                 size > (max_msg_size - bson->len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
         iov[n_docs_in_batch].iov_len = bson->len;
         size += bson->len;
         n_docs_in_batch++;
         data_offset += bson->len;
      }

      idx++;
   }
   bson_reader_destroy (reader);

   if (n_docs_in_batch) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags =
         ((command->flags.ordered) ? MONGOC_INSERT_NONE
                                   : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         bool err = false;
         bson_iter_t citer;

         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            GOTO (cleanup);
         }

         err = (bson_iter_init_find (&citer, gle, "err") &&
                bson_iter_as_bool (&citer));

         /*
          * Overwrite the "n" field since it will be zero. Otherwise, our
          * merge_legacy code will not know how many we tried in this batch.
          */
         if (!err && bson_iter_init_find (&citer, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) {
            bson_iter_overwrite_int32 (&citer, n_docs_in_batch);
         }
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }

cleanup:

   if (gle) {
      _mongoc_write_result_merge_legacy (result,
                                         command,
                                         gle,
                                         client->error_api_version,
                                         MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                                         current_offset);

      current_offset = offset + idx;
      bson_destroy (gle);
      gle = NULL;
   }

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
コード例 #21
0
void
_mongoc_write_command_update_legacy (
   mongoc_write_command_t *command,
   mongoc_client_t *client,
   mongoc_server_stream_t *server_stream,
   const char *database,
   const char *collection,
   const mongoc_write_concern_t *write_concern,
   uint32_t offset,
   mongoc_write_result_t *result,
   bson_error_t *error)
{
   int64_t started;
   int32_t max_bson_obj_size;
   mongoc_rpc_t rpc;
   uint32_t request_id = 0;
   bson_iter_t subiter, subsubiter;
   bson_t doc;
   bool has_update, has_selector, is_upsert;
   bson_t update, selector;
   bson_t *gle = NULL;
   const uint8_t *data = NULL;
   uint32_t len = 0;
   size_t err_offset;
   bool val = false;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   int32_t affected = 0;
   int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
                 BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);

   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
          BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
         bson_iter_document (&subiter, &len, &data);
         bson_init_static (&doc, data, len);

         if (bson_iter_init (&subsubiter, &doc) &&
             bson_iter_next (&subsubiter) &&
             (bson_iter_key (&subsubiter)[0] != '$') &&
             !bson_validate (
                &doc, (bson_validate_flags_t) vflags, &err_offset)) {
            result->failed = true;
            bson_set_error (error,
                            MONGOC_ERROR_BSON,
                            MONGOC_ERROR_BSON_INVALID,
                            "update document is corrupt or contains "
                            "invalid keys including $ or .");
            bson_reader_destroy (reader);
            EXIT;
         }
      } else {
         result->failed = true;
         bson_set_error (error,
                         MONGOC_ERROR_BSON,
                         MONGOC_ERROR_BSON_INVALID,
                         "updates is malformed.");
         bson_reader_destroy (reader);
         EXIT;
      }
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   bson_reader_destroy (reader);
   reader =
      bson_reader_new_from_data (command->payload.data, command->payload.len);
   while ((bson = bson_reader_read (reader, &eof))) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_UPDATE;
      rpc.update.zero = 0;
      rpc.update.collection = ns;
      rpc.update.flags = MONGOC_UPDATE_NONE;

      has_update = false;
      has_selector = false;
      is_upsert = false;

      bson_iter_init (&subiter, bson);
      while (bson_iter_next (&subiter)) {
         if (strcmp (bson_iter_key (&subiter), "u") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.update = data;
            bson_init_static (&update, data, len);
            has_update = true;
         } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
            bson_iter_document (&subiter, &len, &data);
            if (len > max_bson_obj_size) {
               _mongoc_write_command_too_large_error (
                  error, 0, len, max_bson_obj_size, NULL);
               result->failed = true;
               bson_reader_destroy (reader);
               EXIT;
            }

            rpc.update.selector = data;
            bson_init_static (&selector, data, len);
            has_selector = true;
         } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
            }
         } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
            val = bson_iter_bool (&subiter);
            if (val) {
               rpc.update.flags = (mongoc_update_flags_t) (
                  rpc.update.flags | MONGOC_UPDATE_UPSERT);
            }
            is_upsert = true;
         }
      }

      _mongoc_monitor_legacy_write (client,
                                    command,
                                    database,
                                    collection,
                                    write_concern,
                                    server_stream,
                                    request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, write_concern, error)) {
         result->failed = true;
         bson_reader_destroy (reader);
         EXIT;
      }

      if (mongoc_write_concern_is_acknowledged (write_concern)) {
         if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
            result->failed = true;
            bson_reader_destroy (reader);
            EXIT;
         }

         if (bson_iter_init_find (&subiter, gle, "n") &&
             BSON_ITER_HOLDS_INT32 (&subiter)) {
            affected = bson_iter_int32 (&subiter);
         }

         /*
          * CDRIVER-372:
          *
          * Versions of MongoDB before 2.6 don't return the _id for an
          * upsert if _id is not an ObjectId.
          */
         if (is_upsert && affected &&
             !bson_iter_init_find (&subiter, gle, "upserted") &&
             bson_iter_init_find (&subiter, gle, "updatedExisting") &&
             BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) {
            if (has_update && bson_iter_init_find (&subiter, &update, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            } else if (has_selector &&
                       bson_iter_init_find (&subiter, &selector, "_id")) {
               _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
            }
         }

         _mongoc_write_result_merge_legacy (
            result,
            command,
            gle,
            client->error_api_version,
            MONGOC_ERROR_COLLECTION_UPDATE_FAILED,
            offset);

         offset++;
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              gle,
                                              server_stream,
                                              request_id);

      if (gle) {
         bson_destroy (gle);
         gle = NULL;
      }

      started = bson_get_monotonic_time ();
   }
   bson_reader_destroy (reader);
}
コード例 #22
0
void
_mongoc_write_command_insert_legacy (mongoc_write_command_t *command,
                                     mongoc_client_t *client,
                                     mongoc_server_stream_t *server_stream,
                                     const char *database,
                                     const char *collection,
                                     uint32_t offset,
                                     mongoc_write_result_t *result,
                                     bson_error_t *error)
{
   int64_t started;
   mongoc_iovec_t *iov;
   mongoc_rpc_t rpc;
   uint32_t size = 0;
   bool has_more;
   char ns[MONGOC_NAMESPACE_MAX + 1];
   uint32_t n_docs_in_batch;
   uint32_t request_id = 0;
   uint32_t idx = 0;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   bool singly;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;
   int data_offset = 0;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);
   BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);

   started = bson_get_monotonic_time ();

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);

   singly = !command->u.insert.allow_bulk_op_insert;

   if (!command->n_documents) {
      bson_set_error (error,
                      MONGOC_ERROR_COLLECTION,
                      MONGOC_ERROR_COLLECTION_INSERT_FAILED,
                      "Cannot do an empty insert.");
      result->failed = true;
      EXIT;
   }

   bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);

   iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);

again:
   has_more = false;
   n_docs_in_batch = 0;
   size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
                      strlen (collection) + 1);

   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);
   while ((bson = bson_reader_read (reader, &eof))) {
      BSON_ASSERT (n_docs_in_batch <= idx);
      BSON_ASSERT (idx <= command->n_documents);

      if (bson->len > max_bson_obj_size) {
         /* document is too large */
         _mongoc_write_command_too_large_error (
            error, idx, bson->len, max_bson_obj_size);

         data_offset += bson->len;

         if (command->flags.ordered) {
            /* send the batch so far (if any) and return the error */
            break;
         }
      } else if ((n_docs_in_batch == 1 && singly) ||
                 size > (max_msg_size - bson->len)) {
         /* batch is full, send it and then start the next batch */
         has_more = true;
         break;
      } else {
         /* add document to batch and continue building the batch */
         iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
         iov[n_docs_in_batch].iov_len = bson->len;
         size += bson->len;
         n_docs_in_batch++;
         data_offset += bson->len;
      }

      idx++;
   }
   bson_reader_destroy (reader);

   if (n_docs_in_batch) {
      request_id = ++client->cluster.request_id;

      rpc.header.msg_len = 0;
      rpc.header.request_id = request_id;
      rpc.header.response_to = 0;
      rpc.header.opcode = MONGOC_OPCODE_INSERT;
      rpc.insert.flags =
         ((command->flags.ordered) ? MONGOC_INSERT_NONE
                                   : MONGOC_INSERT_CONTINUE_ON_ERROR);
      rpc.insert.collection = ns;
      rpc.insert.documents = iov;
      rpc.insert.n_documents = n_docs_in_batch;

      _mongoc_monitor_legacy_write (
         client, command, database, collection, server_stream, request_id);

      if (!mongoc_cluster_legacy_rpc_sendv_to_server (
             &client->cluster, &rpc, server_stream, error)) {
         result->failed = true;
         GOTO (cleanup);
      }

      _mongoc_monitor_legacy_write_succeeded (client,
                                              bson_get_monotonic_time () -
                                                 started,
                                              command,
                                              server_stream,
                                              request_id);

      started = bson_get_monotonic_time ();
   }

cleanup:

   if (has_more) {
      GOTO (again);
   }

   bson_free (iov);

   EXIT;
}
コード例 #23
0
ファイル: bson-validate.c プロジェクト: HeliumProject/libbson
int
main (int   argc,
      char *argv[])
{
   bson_reader_t *reader;
   const bson_t *b;
   const char *filename;
   size_t offset;
   int docnum;
   int fd;
   int i;

   /*
    * Print program usage if no arguments are provided.
    */
   if (argc == 1) {
      fprintf(stderr, "usage: %s FILE...\n", argv[0]);
      return 1;
   }

   /*
    * Process command line arguments expecting each to be a filename.
    */
   for (i = 1; i < argc; i++) {
      filename = argv[i];
      docnum = 0;

      /*
       * Open the filename provided in command line arguments.
       */
      errno = 0;
      fd = open(filename, O_RDONLY);
      if (fd == -1) {
         fprintf(stderr, "Failed to open %s: %s\n", filename, strerror(errno));
         continue;
      }

      /*
       * Initialize a new reader for this file descriptor.
       */
      reader = bson_reader_new_from_fd(fd, TRUE);

      /*
       * Convert each incoming document to JSON and print to stdout.
       */
      while ((b = bson_reader_read(reader, NULL))) {
         docnum++;
         if (!bson_validate(b,
                            (BSON_VALIDATE_UTF8 |
                             BSON_VALIDATE_DOLLAR_KEYS |
                             BSON_VALIDATE_DOT_KEYS |
                             BSON_VALIDATE_UTF8_ALLOW_NULL),
                            &offset)) {
            fprintf(stderr,
                    "Document %u in \"%s\" is invalid at offset %u.\n",
                    docnum, filename, (int)offset);
            return 1;
         }
      }

      /*
       * Cleanup after our reader, which closes the file descriptor.
       */
      bson_reader_destroy(reader);
   }

   return 0;
}
コード例 #24
0
ファイル: bson-metrics.c プロジェクト: acmorrow/libbson
int
main (int   argc,
      char *argv[])
{
   bson_reader_t *reader;
   const bson_t *b;
   bson_error_t error;
   const char *filename;
   int i, j;
   double dtime_before, dtime_after, dtime_delta;
   uint64_t aggregate_count;
   off_t mark;

   /*
    * Print program usage if no arguments are provided.
    */
   if (argc == 1) {
      fprintf(stderr, "usage: %s FILE...\n", argv[0]);
      return 1;
   }

   /*
    * Process command line arguments expecting each to be a filename.
    */
   printf("[");
   for (i = 1; i < argc; i++) {
      if (i > 1) printf(",");
      filename = argv[i];

      /*
       * Initialize a new reader for this file descriptor.
       */
      if (!(reader = bson_reader_new_from_file (filename, &error))) {
         fprintf (stderr, "Failed to open \"%s\": %s\n",
                  filename, error.message);
         continue;
      }

      state = initial_state;
      dtime_before = dtimeofday();
      mark = 0;
      while ((b = bson_reader_read (reader, NULL))) {
        off_t pos = bson_reader_tell(reader);
        state.doc_size_max = MAX(pos - mark, state.doc_size_max);
        mark = pos;
        bson_metrics(b, NULL, &state);
      }
      dtime_after = dtimeofday();
      dtime_delta = MAX(dtime_after - dtime_before, 0.000001);
      state.bson_type_metrics[BSON_TYPE_MAXKEY].description = "Max key";
      state.bson_type_metrics[BSON_TYPE_MINKEY].description = "Min key";
      aggregate_count = state.bson_type_metrics[BSON_TYPE_DOCUMENT].count + state.bson_type_metrics[BSON_TYPE_ARRAY].count;
      qsort(state.bson_type_metrics, 256, sizeof(bson_type_metrics_t), compar_bson_type_metrics);

      printf("\n  {\n");
      printf("    \"file\": \"%s\",\n", filename);
      printf("    \"secs\": %.2f,\n", dtime_delta);
      printf("    \"docs_per_sec\": %"PRIu64",\n", (uint64_t)round(state.doc_count/dtime_delta));
      printf("    \"docs\": %"PRIu64",\n", state.doc_count);
      printf("    \"elements\": %"PRIu64",\n", state.element_count);
      printf("    \"elements_per_doc\": %"PRIu64",\n", (uint64_t)round((double)state.element_count/(double)MAX(state.doc_count, 1)));
      printf("    \"aggregates\": %"PRIu64",\n", aggregate_count);
      printf("    \"aggregates_per_doc\": %"PRIu64",\n", (uint64_t)round((double)aggregate_count/(double)MAX(state.doc_count, 1)));
      printf("    \"degree\": %"PRIu64",\n", (uint64_t)round((double)state.element_count/((double)MAX(state.doc_count + aggregate_count, 1))));
      printf("    \"doc_size_max\": %"PRIu64",\n", state.doc_size_max);
      printf("    \"doc_size_average\": %"PRIu64",\n", (uint64_t)round((double)bson_reader_tell(reader)/(double)MAX(state.doc_count, 1)));
      printf("    \"key_size_average\": %"PRIu64",\n", (uint64_t)round((double)state.key_size_tally/(double)MAX(state.element_count, 1)));
      printf("    \"string_size_average\": %"PRIu64",\n", (uint64_t)round((double)state.utf8_size_tally/(double)MAX(state.bson_type_metrics[BSON_TYPE_UTF8].count, 1)));
      printf("    \"percent_by_type\": {\n");
      for (j = 0; state.bson_type_metrics[j].count > 0; j++) {
        bson_type_metrics_t bson_type_metrics = state.bson_type_metrics[j];
        printf("      \"%s\": %"PRIu64",\n", bson_type_metrics.description, (uint64_t)round((double)bson_type_metrics.count*100.0/(double)MAX(state.element_count, 1)));
      }
      printf("    }\n");
      printf("  }");

      /*
       * Cleanup after our reader, which closes the file descriptor.
       */
      bson_reader_destroy (reader);
   }
   printf("\n]\n");

   return 0;
}
コード例 #25
0
ファイル: mongoc-write-command.c プロジェクト: cran/mongolite
static void
_mongoc_write_opquery (mongoc_write_command_t *command,
                       mongoc_client_t *client,
                       mongoc_server_stream_t *server_stream,
                       const char *database,
                       const char *collection,
                       const mongoc_write_concern_t *write_concern,
                       uint32_t offset,
                       mongoc_write_result_t *result,
                       bson_error_t *error)
{
   mongoc_cmd_parts_t parts;
   bson_iter_t iter;
   const char *key;
   uint32_t len = 0;
   bson_t ar;
   bson_t cmd;
   bson_t reply;
   char str[16];
   bool has_more;
   bool ret = false;
   uint32_t i;
   int32_t max_bson_obj_size;
   int32_t max_write_batch_size;
   uint32_t overhead;
   uint32_t key_len;
   int data_offset = 0;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   bson_init (&cmd);
   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_write_batch_size =
      mongoc_server_stream_max_write_batch_size (server_stream);

again:
   has_more = false;
   i = 0;

   _mongoc_write_command_init (&cmd, command, collection);

   /* 1 byte to specify array type, 1 byte for field name's null terminator */
   overhead = cmd.len + 2 + gCommandFieldLens[command->type];


   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);

   bson_append_array_begin (&cmd,
                            gCommandFields[command->type],
                            gCommandFieldLens[command->type],
                            &ar);

   while ((bson = bson_reader_read (reader, &eof))) {
      key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);
      len = bson->len;
      /* 1 byte to specify document type, 1 byte for key's null terminator */
      if (_mongoc_write_command_will_overflow (overhead,
                                               key_len + len + 2 + ar.len,
                                               i,
                                               max_bson_obj_size,
                                               max_write_batch_size)) {
         has_more = true;
         break;
      }
      BSON_APPEND_DOCUMENT (&ar, key, bson);
      data_offset += len;
      i++;
   }

   bson_append_array_end (&cmd, &ar);

   if (!i) {
      _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size);
      result->failed = true;
      result->must_stop = true;
      ret = false;
      if (bson) {
         data_offset += len;
      }
   } else {
      mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd);
      parts.is_write_command = true;
      parts.assembled.operation_id = command->operation_id;
      if (!mongoc_cmd_parts_set_write_concern (
             &parts,
             write_concern,
             server_stream->sd->max_wire_version,
             error)) {
         bson_reader_destroy (reader);
         bson_destroy (&cmd);
         mongoc_cmd_parts_cleanup (&parts);
         EXIT;
      }

      BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts));
      if (!mongoc_cmd_parts_append_opts (
             &parts, &iter, server_stream->sd->max_wire_version, error)) {
         bson_reader_destroy (reader);
         bson_destroy (&cmd);
         mongoc_cmd_parts_cleanup (&parts);
         EXIT;
      }

      ret = mongoc_cmd_parts_assemble (&parts, server_stream, error);
      if (ret) {
         ret = mongoc_cluster_run_command_monitored (
            &client->cluster, &parts.assembled, &reply, error);
      } else {
         /* assembling failed */
         result->must_stop = true;
         bson_init (&reply);
      }

      if (!ret) {
         result->failed = true;
         if (bson_empty (&reply)) {
            /* assembling failed, or a network error running the command */
            result->must_stop = true;
         }
      }

      _mongoc_write_result_merge (result, command, &reply, offset);
      offset += i;
      bson_destroy (&reply);
      mongoc_cmd_parts_cleanup (&parts);
   }
   bson_reader_destroy (reader);

   if (has_more && (ret || !command->flags.ordered) && !result->must_stop) {
      bson_reinit (&cmd);
      GOTO (again);
   }

   bson_destroy (&cmd);
   EXIT;
}
コード例 #26
0
ファイル: bson-to-json.c プロジェクト: HeliumProject/libbson
int
main (int   argc,
      char *argv[])
{
   bson_reader_t *reader;
   const bson_t *b;
   const char *filename;
   char *str;
   int fd;
   int i;

   /*
    * Print program usage if no arguments are provided.
    */
   if (argc == 1) {
      fprintf(stderr, "usage: %s FILE...\n", argv[0]);
      return 1;
   }

   /*
    * Process command line arguments expecting each to be a filename.
    */
   for (i = 1; i < argc; i++) {
      filename = argv[i];

      /*
       * Open the filename provided in command line arguments.
       */
      if (0 == strcmp(filename, "-")) {
         fd = STDIN_FILENO;
      } else {
         errno = 0;
         fd = open(filename, O_RDONLY);
         if (fd == -1) {
            fprintf(stderr, "Failed to open %s: %s\n", filename, strerror(errno));
            continue;
         }
      }

      /*
       * Initialize a new reader for this file descriptor.
       */
      reader = bson_reader_new_from_fd(fd, TRUE);

      /*
       * Convert each incoming document to JSON and print to stdout.
       */
      while ((b = bson_reader_read(reader, NULL))) {
         str = bson_as_json(b, NULL);
         fprintf(stdout, "%s\n", str);
         bson_free(str);
      }

      /*
       * Cleanup after our reader, which closes the file descriptor.
       */
      bson_reader_destroy(reader);
   }

   return 0;
}
コード例 #27
0
ファイル: mongoc-cursor.c プロジェクト: docee/mongo-c-driver
static bool
_mongoc_cursor_query (mongoc_cursor_t *cursor)
{
   mongoc_rpc_t rpc;
   uint32_t hint;
   uint32_t request_id;

   ENTRY;

   bson_return_val_if_fail (cursor, false);

   if (!_mongoc_client_warm_up (cursor->client, &cursor->error)) {
      cursor->failed = true;
      RETURN (false);
   }

   rpc.query.msg_len = 0;
   rpc.query.request_id = 0;
   rpc.query.response_to = 0;
   rpc.query.opcode = MONGOC_OPCODE_QUERY;
   rpc.query.flags = cursor->flags;
   rpc.query.collection = cursor->ns;
   rpc.query.skip = cursor->skip;
   if ((cursor->flags & MONGOC_QUERY_TAILABLE_CURSOR)) {
      rpc.query.n_return = 0;
   } else {
      rpc.query.n_return = _mongoc_n_return(cursor);
   }
   rpc.query.query = bson_get_data(&cursor->query);

   if (cursor->has_fields) {
      rpc.query.fields = bson_get_data (&cursor->fields);
   } else {
      rpc.query.fields = NULL;
   }

   if (!(hint = _mongoc_client_sendv (cursor->client, &rpc, 1,
                                      cursor->hint, NULL,
                                      cursor->read_prefs,
                                      &cursor->error))) {
      GOTO (failure);
   }

   cursor->hint = hint;
   request_id = BSON_UINT32_FROM_LE(rpc.header.request_id);

   _mongoc_buffer_clear(&cursor->buffer, false);

   if (!_mongoc_client_recv(cursor->client,
                            &cursor->rpc,
                            &cursor->buffer,
                            hint,
                            &cursor->error)) {
      GOTO (failure);
   }

   if (cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_PROTOCOL,
                      MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
                      "Invalid opcode. Expected %d, got %d.",
                      MONGOC_OPCODE_REPLY, cursor->rpc.header.opcode);
      GOTO (failure);
   }

   if (cursor->rpc.header.response_to != request_id) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_PROTOCOL,
                      MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
                      "Invalid response_to. Expected %d, got %d.",
                      request_id, cursor->rpc.header.response_to);
      GOTO (failure);
   }

   if (_mongoc_cursor_unwrap_failure(cursor)) {
      if ((cursor->error.domain == MONGOC_ERROR_QUERY) &&
          (cursor->error.code == MONGOC_ERROR_QUERY_NOT_TAILABLE)) {
         cursor->failed = true;
      }
      GOTO (failure);
   }

   if (cursor->reader) {
      bson_reader_destroy(cursor->reader);
   }

   cursor->reader = bson_reader_new_from_data(cursor->rpc.reply.documents,
                                              cursor->rpc.reply.documents_len);

   if ((cursor->flags & MONGOC_QUERY_EXHAUST)) {
      cursor->in_exhaust = true;
      cursor->client->in_exhaust = true;
   }

   cursor->done = false;
   cursor->end_of_event = false;
   cursor->sent = true;

   RETURN (true);

failure:
   cursor->failed = true;
   cursor->done = true;

   RETURN (false);
}