Пример #1
0
static mongoc_cursor_t *
query_collection (mongoc_collection_t *collection,
                  uint32_t        last_time)
{
   mongoc_cursor_t *cursor;
   bson_t query;
   bson_t gt;

   BSON_ASSERT(collection);

   bson_init(&query);
   bson_append_document_begin(&query, "ts", 2, &gt);
   bson_append_timestamp(&gt, "$gt", 3, last_time, 0);
   bson_append_document_end(&query, &gt);

   cursor = mongoc_collection_find(collection,
                                   (MONGOC_QUERY_TAILABLE_CURSOR |
                                    MONGOC_QUERY_AWAIT_DATA |
                                    MONGOC_QUERY_SLAVE_OK),
                                   0,
                                   0,
                                   0,
                                   &query,
                                   NULL,
                                   NULL);

   bson_destroy(&query);

   return cursor;
}
Пример #2
0
static int mongo_del_origin(u08bits *origin)
{
  mongoc_collection_t * collection = mongo_get_collection("realm"); 

  if(!collection)
    return -1;
    
  int ret = -1;
  
  bson_t query, doc, child;
  bson_init(&query);
  bson_init(&doc);
  bson_append_document_begin(&doc, "$pull", -1, &child);
  BSON_APPEND_UTF8(&child, "origin", (const char *)origin);
  bson_append_document_end(&doc, &child);

  if (!mongoc_collection_update(collection, MONGOC_UPDATE_MULTI_UPDATE, &query, &doc, NULL, NULL)) {
    TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error deleting origin information\n");
  } else {
    ret = 0;
  }
  mongoc_collection_destroy(collection);
  bson_destroy(&query);
  bson_destroy(&doc);
  return ret;
}
static bool append_value(bson_t* bson, const char* key, size_t length, object_t* value) {
    switch (value->type) {
        case type_nil:    bson_append_null(bson, key, length);                       break; 
        case type_bool:   bson_append_bool(bson, key, length, value->b);             break; 
        case type_double: bson_append_double(bson, key, length, value->d);           break; 
        case type_str:    bson_append_utf8(bson, key, length, value->str, value->l); break; 

        case type_int:    append_int(bson, key, length, value->i);                   break; 
        case type_uint:   append_int(bson, key, length, (int64_t)value->u);          break;

        case type_map: {
            bson_t child;
            bson_append_document_begin(bson, key, length, &child);
            append_document(&child, value);
            bson_append_document_end(bson, &child);
        } break;

        case type_array: {
            bson_t child;
            bson_append_array_begin(bson, key, length, &child);
            append_array(&child, value);
            bson_append_array_end(bson, &child);
        } break;

        default:
            return false;
    }
    return true;
}
Пример #4
0
static int mongo_add_origin(u08bits *origin, u08bits *realm)
{
	mongoc_collection_t * collection = mongo_get_collection("realm");

	if(!collection)
		return -1;
    
	int ret = -1;

	u08bits realm0[STUN_MAX_REALM_SIZE+1] = "\0";
	if(!realm) realm=realm0;
  
	bson_t query, doc, child;
	bson_init(&query);
	BSON_APPEND_UTF8(&query, "realm", (const char *)realm);
	bson_init(&doc);
	bson_append_document_begin(&doc, "$addToSet", -1, &child);
	BSON_APPEND_UTF8(&child, "origin", (const char *)origin);
	bson_append_document_end(&doc, &child);

	if (!mongoc_collection_update(collection, MONGOC_UPDATE_UPSERT, &query, &doc, NULL, NULL)) {
		TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error inserting/updating realm origin information\n");
	} else {
		ret = 0;
	}
	mongoc_collection_destroy(collection);
	bson_destroy(&query);
	bson_destroy(&doc);
	return ret;
}
Пример #5
0
/* check  version */
static gboolean 
sim_parser_connect_test4 (void)
{
{
  bson_t *bson_connect = bson_new ();
  bson_t child;
  SimParser *parser = NULL;
  SimCommand *cmd = NULL;
  gboolean result = FALSE;
  uint8_t uuid[]={0x07,0x92,0xd6,0x72,0xf4,0xce,0x11,0xe4,0x9d,0xe2,0x00,0x0c,0x29,0xd9,0x46,0xde};
  bson_append_document_begin (bson_connect,"connect", -1, &child);
  BSON_APPEND_UTF8 (&child, "id", "bad id");
  BSON_APPEND_INT32 (&child, "type", SIM_SESSION_TYPE_WEB);
  BSON_APPEND_UTF8 (&child, "version", "x.x.x");
  if (bson_append_binary (&child, "sensor_id", -1, BSON_SUBTYPE_UUID, uuid, 16) == FALSE)
    return FALSE;
  bson_append_document_end (bson_connect, &child);
  do{
    if ((parser = sim_parser_new()) == NULL)
    { 
      result = FALSE;
      break;
    }
    if ((cmd = sim_parser_bson (parser, bson_get_data (bson_connect), bson_connect->len)) != NULL)
    {
      result = FALSE;
      break;
    }
    result = TRUE;
  } while (0);

  return result;
}
}
Пример #6
0
static void
test_bson_build_child (void)
{
   bson_t b;
   bson_t child;
   bson_t *b2;
   bson_t *child2;

   bson_init(&b);
   assert(bson_append_document_begin(&b, "foo", -1, &child));
   assert(bson_append_utf8(&child, "bar", -1, "baz", -1));
   assert(bson_append_document_end(&b, &child));

   b2 = bson_new();
   child2 = bson_new();
   assert(bson_append_utf8(child2, "bar", -1, "baz", -1));
   assert(bson_append_document(b2, "foo", -1, child2));
   bson_destroy(child2);

   assert(b.len == b2->len);
   assert_bson_equal(&b, b2);

   bson_destroy(&b);
   bson_destroy(b2);
}
Пример #7
0
static void
append_write_err (bson_t *doc,
                  uint32_t code,
                  const char *errmsg,
                  size_t errmsg_len,
                  const bson_t *errinfo)
{
   bson_t array = BSON_INITIALIZER;
   bson_t child;

   BSON_ASSERT (errmsg);

   /* writeErrors: [{index: 0, code: code, errmsg: errmsg, errInfo: {...}}] */
   bson_append_document_begin (&array, "0", 1, &child);
   bson_append_int32 (&child, "index", 5, 0);
   bson_append_int32 (&child, "code", 4, (int32_t) code);
   bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len);
   if (errinfo) {
      bson_append_document (&child, "errInfo", 7, errinfo);
   }

   bson_append_document_end (&array, &child);
   bson_append_array (doc, "writeErrors", 11, &array);

   bson_destroy (&array);
}
Пример #8
0
void _aggregate_recurse_fill(bson_iter_t *iter, bson_t* new_doc, bson_t* existing_aggregate_doc, bson_t* merged_aggregate_doc, const char *key) {
    bson_iter_t child_iter;
    bson_t child_doc;

    while (bson_iter_next (iter)) {
        int new_key_length = strlen(bson_iter_key(iter));
        if (strcmp("", key) != 0) {
            new_key_length += strlen(key) + 1;
        }

        char new_key[new_key_length];
        if (strcmp("", key) == 0) {
            strcpy(new_key, bson_iter_key(iter));
        } else {
            strcpy(new_key, key);
            strcat(new_key, ".");
            strcat(new_key, bson_iter_key(iter));
        }

        if (strcmp("_id", new_key) == 0) {
            bson_value_t *existing_id = _aggregate_get_value_at_key(existing_aggregate_doc, "_id");
            bson_append_value(merged_aggregate_doc, "_id", -1, existing_id);
            continue;
        }

        if (BSON_ITER_HOLDS_DOCUMENT (iter)) {

            const char *agg_key = NULL;
            const bson_value_t *agg_field = NULL;

            if (bson_iter_recurse (iter, &child_iter)) {
                if (bson_iter_next (&child_iter) &&
                    _aggregate_is_agg_operator(bson_iter_key(&child_iter))) {
                    agg_key = bson_iter_key(&child_iter);
                    agg_field = bson_iter_value(&child_iter);
                }

                if (agg_key && !bson_iter_next (&child_iter)) {
                    bson_value_t *existing_value = _aggregate_get_value_at_key(existing_aggregate_doc, new_key);
                    bson_value_t *new_doc_value = _aggregate_get_value_at_key(new_doc, (*agg_field).value.v_utf8.str + 1);
                    bson_value_t * agg_result = _aggregate(existing_value, new_doc_value, agg_key);
                    bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, agg_result);
                    continue;

                }
            }

            bson_append_document_begin (merged_aggregate_doc, bson_iter_key(iter), -1, &child_doc);

            if (bson_iter_recurse (iter, &child_iter)) {
                _aggregate_recurse_fill (&child_iter, new_doc, existing_aggregate_doc, &child_doc, new_key);
            }

            bson_append_document_end (merged_aggregate_doc, &child_doc);
        } else {
            bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, bson_iter_value(iter));
        }
    }
}
Пример #9
0
 frame(bson_t* parent, const char* key, std::size_t len, bool is_array)
     : n(0), is_array(is_array), parent(parent) {
     if (is_array) {
         bson_append_array_begin(parent, key, len, &bson);
     } else {
         bson_append_document_begin(parent, key, len, &bson);
     }
 }
Пример #10
0
static int mongo_set_permission_ip(const char *kind, u08bits *realm, const char* ip, int del)
{
	char sub_collection_name[129];
	snprintf(sub_collection_name,sizeof(sub_collection_name)-1,"%s_peer_ip",kind);

	mongoc_collection_t * collection = mongo_get_collection("realm");

	if(!collection)
		return -1;

	int ret = -1;

	u08bits realm0[STUN_MAX_REALM_SIZE+1] = "\0";
	if(!realm) realm=realm0;

	bson_t query, doc, child;
	bson_init(&query);
	BSON_APPEND_UTF8(&query, "realm", (const char *)realm);
	bson_init(&doc);
	if(del) {
		bson_append_document_begin(&doc, "$pull", -1, &child);
	} else {
		bson_append_document_begin(&doc, "$addToSet", -1, &child);
	}
	BSON_APPEND_UTF8(&child, sub_collection_name, (const char *)ip);
	bson_append_document_end(&doc, &child);

	mongoc_update_flags_t flags = MONGOC_UPDATE_NONE;

	if(del) {
		flags = MONGOC_UPDATE_MULTI_UPDATE;
	} else {
		flags = MONGOC_UPDATE_UPSERT;
	}

	if (!mongoc_collection_update(collection, flags, &query, &doc, NULL, NULL)) {
		TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error inserting permission ip information\n");
	} else {
		ret = 0;
	}
	mongoc_collection_destroy(collection);
	bson_destroy(&query);
	bson_destroy(&doc);
	return ret;
}
Пример #11
0
static int mongo_set_realm_option_one(u08bits *realm, unsigned long value, const char* opt) {
  mongoc_collection_t * collection = mongo_get_collection("realm"); 

	if(!collection)
    return -1;
    
  bson_t query, doc, child;
  bson_init(&query);
  BSON_APPEND_UTF8(&query, "realm", (const char *)realm);
  bson_init(&doc);
  
  size_t klen = 9 + strlen(opt);
  char * _k = (char *)turn_malloc(klen);
  strcpy(_k, "options.");
  strcat(_k, opt);
  
  if (value > 0) {
    bson_append_document_begin(&doc, "$set", -1, &child);
    BSON_APPEND_INT32(&child, _k, (int32_t)value);
    bson_append_document_end(&doc, &child);
  } else {
    bson_append_document_begin(&doc, "$unset", -1, &child);
    BSON_APPEND_INT32(&child, _k, 1);
    bson_append_document_end(&doc, &child);
  }
  turn_free(_k,klen);
  
  int ret = -1;
  
  if (!mongoc_collection_update(collection, MONGOC_UPDATE_MULTI_UPDATE, &query, &doc, NULL, NULL)) {
    TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error deleting origin information\n");
  } else {
    ret = 0;
  }
  mongoc_collection_destroy(collection);
  bson_destroy(&query);
  bson_destroy(&doc);
  return ret;
}
Пример #12
0
int32_t
_mongoc_write_result_merge_arrays (uint32_t offset,
                                   mongoc_write_result_t *result, /* IN */
                                   bson_t *dest,                  /* IN */
                                   bson_iter_t *iter)             /* IN */
{
   const bson_value_t *value;
   bson_iter_t ar;
   bson_iter_t citer;
   int32_t idx;
   int32_t count = 0;
   int32_t aridx;
   bson_t child;
   const char *keyptr = NULL;
   char key[12];
   int len;

   ENTRY;

   BSON_ASSERT (result);
   BSON_ASSERT (dest);
   BSON_ASSERT (iter);
   BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter));

   aridx = bson_count_keys (dest);

   if (bson_iter_recurse (iter, &ar)) {
      while (bson_iter_next (&ar)) {
         if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
             bson_iter_recurse (&ar, &citer)) {
            len =
               (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key);
            bson_append_document_begin (dest, keyptr, len, &child);
            while (bson_iter_next (&citer)) {
               if (BSON_ITER_IS_KEY (&citer, "index")) {
                  idx = bson_iter_int32 (&citer) + offset;
                  BSON_APPEND_INT32 (&child, "index", idx);
               } else {
                  value = bson_iter_value (&citer);
                  BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value);
               }
            }
            bson_append_document_end (dest, &child);
            count++;
         }
      }
   }

   RETURN (count);
}
Пример #13
0
static void
append_write_concern_err (bson_t *doc, const char *errmsg, size_t errmsg_len)
{
   bson_t array = BSON_INITIALIZER;
   bson_t child;
   bson_t errinfo;

   BSON_ASSERT (errmsg);

   /* writeConcernErrors: [{code: 64,
    *                       errmsg: errmsg,
    *                       errInfo: {wtimeout: true}}] */
   bson_append_document_begin (&array, "0", 1, &child);
   bson_append_int32 (&child, "code", 4, 64);
   bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len);
   bson_append_document_begin (&child, "errInfo", 7, &errinfo);
   bson_append_bool (&errinfo, "wtimeout", 8, true);
   bson_append_document_end (&child, &errinfo);
   bson_append_document_end (&array, &child);
   bson_append_array (doc, "writeConcernErrors", 18, &array);

   bson_destroy (&array);
}
Пример #14
0
static void
append_upserted (bson_t *doc, const bson_value_t *upserted_id)
{
   bson_t array = BSON_INITIALIZER;
   bson_t child;

   /* append upserted: [{index: 0, _id: upserted_id}]*/
   bson_append_document_begin (&array, "0", 1, &child);
   bson_append_int32 (&child, "index", 5, 0);
   bson_append_value (&child, "_id", 3, upserted_id);
   bson_append_document_end (&array, &child);

   bson_append_array (doc, "upserted", 8, &array);

   bson_destroy (&array);
}
Пример #15
0
static int
_bson_json_read_map_key (void          *_ctx, /* IN */
                         const uint8_t *val,  /* IN */
                         size_t         len)  /* IN */
{
   bson_json_reader_t *reader = (bson_json_reader_t *)_ctx;
   bson_json_reader_bson_t *bson = &reader->bson;

   if (bson->read_state == BSON_JSON_IN_START_MAP) {
      if (len > 0 && val[0] == '$' && _is_known_key ((const char *)val)) {
         bson->read_state = BSON_JSON_IN_BSON_TYPE;
         bson->bson_type = (bson_type_t) 0;
         memset (&bson->bson_type_data, 0, sizeof bson->bson_type_data);
      } else {
         bson->read_state = BSON_JSON_REGULAR;
         STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT,
                                                     bson->key,
                                                     (int)bson->key_buf.len,
                                                     STACK_BSON_CHILD));
      }
   }

   if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
      if HANDLE_OPTION ("$regex", BSON_TYPE_REGEX, BSON_JSON_LF_REGEX) else
      if HANDLE_OPTION ("$options", BSON_TYPE_REGEX, BSON_JSON_LF_OPTIONS) else
      if HANDLE_OPTION ("$oid", BSON_TYPE_OID, BSON_JSON_LF_OID) else
      if HANDLE_OPTION ("$binary", BSON_TYPE_BINARY, BSON_JSON_LF_BINARY) else
      if HANDLE_OPTION ("$type", BSON_TYPE_BINARY, BSON_JSON_LF_TYPE) else
      if HANDLE_OPTION ("$date", BSON_TYPE_DATE_TIME, BSON_JSON_LF_DATE) else
      if HANDLE_OPTION ("$ref", BSON_TYPE_DBPOINTER, BSON_JSON_LF_REF) else
      if HANDLE_OPTION ("$id", BSON_TYPE_DBPOINTER, BSON_JSON_LF_ID) else
      if HANDLE_OPTION ("$undefined", BSON_TYPE_UNDEFINED,
                        BSON_JSON_LF_UNDEFINED) else
      if HANDLE_OPTION ("$minKey", BSON_TYPE_MINKEY, BSON_JSON_LF_MINKEY) else
      if HANDLE_OPTION ("$maxKey", BSON_TYPE_MAXKEY, BSON_JSON_LF_MAXKEY) else
      if HANDLE_OPTION ("$numberLong", BSON_TYPE_INT64, BSON_JSON_LF_INT64) else
      if (len == strlen ("$timestamp") &&
          memcmp (val, "$timestamp", len) == 0) {
         bson->bson_type = BSON_TYPE_TIMESTAMP;
         bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_STARTMAP;
      } else {
         _bson_json_read_set_error (reader,
                                    "Invalid key %s.  Looking for values for %d",
                                    val, bson->bson_type);
         return 0;
      }
   } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) {
Пример #16
0
void arrayToBSON(const Array& value, const char* key, bson_t* bson) {
  bson_t child;
  bool isDocument = arrayIsDocument(value);
  if (isDocument) {
    bson_append_document_begin(bson, key, -1, &child);
  } else {
    bson_append_array_begin(bson, key, -1, &child);
  }

  fillBSONWithArray(value, &child);

  if (isDocument) {
    bson_append_document_end(bson, &child);
  } else {
    bson_append_array_end(bson, &child);
  }
}
Пример #17
0
void
_mongoc_write_result_append_upsert (mongoc_write_result_t *result,
                                    int32_t idx,
                                    const bson_value_t *value)
{
   bson_t child;
   const char *keyptr = NULL;
   char key[12];
   int len;

   BSON_ASSERT (result);
   BSON_ASSERT (value);

   len = (int) bson_uint32_to_string (
      result->upsert_append_count, &keyptr, key, sizeof key);

   bson_append_document_begin (&result->upserted, keyptr, len, &child);
   BSON_APPEND_INT32 (&child, "index", idx);
   BSON_APPEND_VALUE (&child, "_id", value);
   bson_append_document_end (&result->upserted, &child);

   result->upsert_append_count++;
}
Пример #18
0
static void
_append_write_err_legacy (mongoc_write_result_t *result,
                          const char *err,
                          mongoc_error_domain_t domain,
                          int32_t code,
                          uint32_t offset)
{
   bson_t holder, write_errors, child;
   bson_iter_t iter;

   BSON_ASSERT (code > 0);

   if (!result->error.domain) {
      bson_set_error (&result->error, domain, (uint32_t) code, "%s", err);
   }

   /* stop processing, if result->ordered */
   result->failed = true;

   bson_init (&holder);
   bson_append_array_begin (&holder, "0", 1, &write_errors);
   bson_append_document_begin (&write_errors, "0", 1, &child);

   /* set error's "index" to 0; fixed up in _mongoc_write_result_merge_arrays */
   bson_append_int32 (&child, "index", 5, 0);
   bson_append_int32 (&child, "code", 4, code);
   bson_append_utf8 (&child, "errmsg", 6, err, -1);
   bson_append_document_end (&write_errors, &child);
   bson_append_array_end (&holder, &write_errors);
   bson_iter_init (&iter, &holder);
   bson_iter_next (&iter);

   _mongoc_write_result_merge_arrays (
      offset, result, &result->writeErrors, &iter);

   bson_destroy (&holder);
}
Пример #19
0
static void
test_bson_build_child_deep_1 (bson_t *b,
                              int    *count)
{
   bson_t child;

   (*count)++;

   assert(bson_append_document_begin(b, "b", -1, &child));
   assert(!(b->flags & BSON_FLAG_INLINE));
   assert((b->flags & BSON_FLAG_IN_CHILD));
   assert(!(child.flags & BSON_FLAG_INLINE));
   assert((child.flags & BSON_FLAG_STATIC));
   assert((child.flags & BSON_FLAG_CHILD));

   if (*count < 100) {
      test_bson_build_child_deep_1(&child, count);
   } else {
      assert(bson_append_int32(&child, "b", -1, 1234));
   }

   assert(bson_append_document_end(b, &child));
   assert(!(b->flags & BSON_FLAG_IN_CHILD));
}
Пример #20
0
static void
_append_write_concern_err_legacy (mongoc_write_result_t *result,
                                  const char *err,
                                  int32_t code)
{
   char str[16];
   const char *key;
   size_t keylen;
   bson_t write_concern_error;

   /* don't set result->failed; record the write concern err and continue */
   keylen = bson_uint32_to_string (
      result->n_writeConcernErrors, &key, str, sizeof str);

   BSON_ASSERT (keylen < INT_MAX);

   bson_append_document_begin (
      &result->writeConcernErrors, key, (int) keylen, &write_concern_error);

   bson_append_int32 (&write_concern_error, "code", 4, code);
   bson_append_utf8 (&write_concern_error, "errmsg", 6, err, -1);
   bson_append_document_end (&result->writeConcernErrors, &write_concern_error);
   result->n_writeConcernErrors++;
}
void
_mongoc_write_result_merge_legacy (mongoc_write_result_t  *result,  /* IN */
                                   mongoc_write_command_t *command, /* IN */
                                   const bson_t           *reply,   /* IN */
                                   mongoc_error_code_t     default_code,
                                   uint32_t                offset)
{
   const bson_value_t *value;
   bson_t holder, write_errors, child;
   bson_iter_t iter;
   bson_iter_t ar;
   bson_iter_t citer;
   const char *err = NULL;
   int32_t code = 0;
   int32_t n = 0;
   int32_t upsert_idx = 0;

   ENTRY;

   BSON_ASSERT (result);
   BSON_ASSERT (reply);

   if (bson_iter_init_find (&iter, reply, "n") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      n = bson_iter_int32 (&iter);
   }

   if (bson_iter_init_find (&iter, reply, "err") &&
       BSON_ITER_HOLDS_UTF8 (&iter)) {
      err = bson_iter_utf8 (&iter, NULL);
   }

   if (bson_iter_init_find (&iter, reply, "code") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      code = bson_iter_int32 (&iter);
   }

   if (code || err) {
      if (!code) {
         code = default_code;
      }

      if (!err) {
         err = "unknown error";
      }

      bson_set_error (&result->error,
                      MONGOC_ERROR_COLLECTION,
                      code,
                      "%s", err);
      result->failed = true;

      bson_init(&holder);
      bson_append_array_begin(&holder, "0", 1, &write_errors);
      bson_append_document_begin(&write_errors, "0", 1, &child);
      bson_append_int32(&child, "index", 5, 0);
      bson_append_int32(&child, "code", 4, code);
      bson_append_utf8(&child, "errmsg", 6, err, -1);
      bson_append_document_end(&write_errors, &child);
      bson_append_array_end(&holder, &write_errors);
      bson_iter_init(&iter, &holder);
      bson_iter_next(&iter);

      _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors,
                                         &iter);

      bson_destroy(&holder);
   }

   switch (command->type) {
   case MONGOC_WRITE_COMMAND_INSERT:
      if (n) {
         result->nInserted += n;
      }
      break;
   case MONGOC_WRITE_COMMAND_DELETE:
      result->nRemoved += n;
      break;
   case MONGOC_WRITE_COMMAND_UPDATE:
      if (bson_iter_init_find (&iter, reply, "upserted") &&
         !BSON_ITER_HOLDS_ARRAY (&iter)) {
         result->nUpserted += n;
         value = bson_iter_value (&iter);
         _mongoc_write_result_append_upsert (result, offset, value);
      } else if (bson_iter_init_find (&iter, reply, "upserted") &&
                 BSON_ITER_HOLDS_ARRAY (&iter)) {
         result->nUpserted += n;
         if (bson_iter_recurse (&iter, &ar)) {
            while (bson_iter_next (&ar)) {
               if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
                   bson_iter_recurse (&ar, &citer) &&
                   bson_iter_find (&citer, "_id")) {
                  value = bson_iter_value (&citer);
                  _mongoc_write_result_append_upsert (result,
                                                      offset + upsert_idx,
                                                      value);
                  upsert_idx++;
               }
            }
         }
      } else if ((n == 1) &&
                 bson_iter_init_find (&iter, reply, "updatedExisting") &&
                 BSON_ITER_HOLDS_BOOL (&iter) &&
                 !bson_iter_bool (&iter)) {
         result->nUpserted += n;
      } else {
         result->nMatched += n;
      }
      break;
   default:
      break;
   }

   result->omit_nModified = true;

   EXIT;
}
Пример #22
0
static void
ha_replica_set_configure (ha_replica_set_t *replica_set,
                          ha_node_t        *primary)
{
   mongoc_database_t *database;
   mongoc_client_t *client;
   mongoc_cursor_t *cursor;
   const bson_t *doc;
   bson_error_t error;
   bson_iter_t iter;
   ha_node_t *node;
   bson_t ar;
   bson_t cmd;
   bson_t config;
   bson_t member;
   char *str;
   char *uristr;
   char hoststr[32];
   char key[8];
   int i = 0;

   uristr = bson_strdup_printf("mongodb://127.0.0.1:%hu/", primary->port);
   client = mongoc_client_new(uristr);
#ifdef MONGOC_ENABLE_SSL
   if (replica_set->ssl_opt) {
      mongoc_client_set_ssl_opts(client, replica_set->ssl_opt);
   }
#endif
   bson_free(uristr);

   bson_init(&cmd);
   bson_append_document_begin(&cmd, "replSetInitiate", -1, &config);
   bson_append_utf8(&config, "_id", 3, replica_set->name, -1);
   bson_append_array_begin(&config, "members", -1, &ar);
   for (node = replica_set->nodes; node; node = node->next) {
      snprintf(key, sizeof key, "%u", i);
      key[sizeof key - 1] = '\0';

      snprintf(hoststr, sizeof hoststr, "127.0.0.1:%hu", node->port);
      hoststr[sizeof hoststr - 1] = '\0';

      bson_append_document_begin(&ar, key, -1, &member);
      bson_append_int32(&member, "_id", -1, i);
      bson_append_utf8(&member, "host", -1, hoststr, -1);
      bson_append_bool(&member, "arbiterOnly", -1, node->is_arbiter);
      bson_append_document_end(&ar, &member);

      i++;
   }
   bson_append_array_end(&config, &ar);
   bson_append_document_end(&cmd, &config);

   str = bson_as_json(&cmd, NULL);
   MONGOC_DEBUG("Config: %s", str);
   bson_free(str);

   database = mongoc_client_get_database(client, "admin");

again:
   cursor = mongoc_database_command(database,
                                    MONGOC_QUERY_NONE,
                                    0,
                                    1,
                                    &cmd,
                                    NULL,
                                    NULL);

   while (mongoc_cursor_next(cursor, &doc)) {
      str = bson_as_json(doc, NULL);
      MONGOC_DEBUG("Reply: %s", str);
      bson_free(str);
      if (bson_iter_init_find(&iter, doc, "ok") &&
          bson_iter_as_bool(&iter)) {
         goto cleanup;
      }
   }

   if (mongoc_cursor_error(cursor, &error)) {
      mongoc_cursor_destroy(cursor);
      MONGOC_WARNING("%s: Retrying in 1 second.", error.message);
      sleep(1);
      goto again;
   }

cleanup:
   mongoc_cursor_destroy(cursor);
   mongoc_database_destroy(database);
   mongoc_client_destroy(client);
   bson_destroy(&cmd);
}
Пример #23
0
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t           *client,
                    const char                *db_and_collection,
                    mongoc_query_flags_t       flags,
                    bson_uint32_t              skip,
                    bson_uint32_t              limit,
                    bson_uint32_t              batch_size,
                    bson_bool_t                is_command,
                    const bson_t              *query,
                    const bson_t              *fields,
                    const mongoc_read_prefs_t *read_prefs)
{
   mongoc_read_mode_t mode;
   mongoc_cursor_t *cursor;
   const bson_t *tags;
   const char *mode_str;
   bson_t child;

   ENTRY;

   BSON_ASSERT(client);
   BSON_ASSERT(db_and_collection);
   BSON_ASSERT(query);

   /* we can't have exhaust queries with limits */
   BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && limit));

   /* we can't have exhaust queries with sharded clusters */
   BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid));

   /*
    * Cursors execute their query lazily. This sadly means that we must copy
    * some extra data around between the bson_t structures. This should be
    * small in most cases, so it reduces to a pure memcpy. The benefit to this
    * design is simplified error handling by API consumers.
    */

   cursor = bson_malloc0(sizeof *cursor);
   cursor->client = client;
   strncpy(cursor->ns, db_and_collection, sizeof cursor->ns - 1);
   cursor->nslen = strlen(cursor->ns);
   cursor->flags = flags;
   cursor->skip = skip;
   cursor->limit = limit;
   cursor->batch_size = cursor->batch_size;

   cursor->is_command = is_command;

   if (!bson_has_field (query, "$query")) {
      bson_init (&cursor->query);
      bson_append_document (&cursor->query, "$query", 6, query);
   } else {
      bson_copy_to (query, &cursor->query);
   }

   if (read_prefs) {
      cursor->read_prefs = mongoc_read_prefs_copy (read_prefs);

      mode = mongoc_read_prefs_get_mode (read_prefs);
      tags = mongoc_read_prefs_get_tags (read_prefs);

      if (mode != MONGOC_READ_PRIMARY) {
         flags |= MONGOC_QUERY_SLAVE_OK;

         if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) {
            bson_append_document_begin (&cursor->query, "$readPreference",
                                        15, &child);
            mode_str = _mongoc_cursor_get_read_mode_string (mode);
            bson_append_utf8 (&child, "mode", 4, mode_str, -1);
            if (tags) {
               bson_append_array (&child, "tags", 4, tags);
            }
            bson_append_document_end (&cursor->query, &child);
         }
      }
   }

   if (fields) {
      bson_copy_to(fields, &cursor->fields);
   } else {
      bson_init(&cursor->fields);
   }

   _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL);

   mongoc_counter_cursors_active_inc();

   RETURN(cursor);
}
Пример #24
0
void VariantToBsonConverter::convertDocument(bson_t *bson, const char *property_name, Variant v)
{
	bson_t child;
	int unmangle = 0;
	Array document;

	/* if we are not at a top-level, we need to check (and convert) special
	 * BSON types too */
	if (v.isObject()) {
		if (convertSpecialObject(bson, property_name, v.toObject())) {
			return;
		}
		/* The "convertSpecialObject" method didn't understand this type, so we
		 * will continue treating this as a normal document */
	}

	document = v.toObject()->o_toIterArray(null_string, ObjectData::PreserveRefs);

	if (_isPackedArray(document) && !v.isObject()) {
		if (property_name != NULL) {
			bson_append_array_begin(bson, property_name, -1, &child);
		}
	} else {
		unmangle = 1;
		if (property_name != NULL) {
			bson_append_document_begin(bson, property_name, -1, &child);
		}
	}

	for (ArrayIter iter(document); iter; ++iter) {
		Variant key(iter.first());
		const Variant& data(iter.secondRef());
		String s_key = key.toString();

		if (m_level == 0 && (m_flags & HIPPO_BSON_ADD_ID)) {
			/* If we have an ID, we don't need to add it. But we also need to
			 * set m_out to the value! */
			if (strncmp(s_key.c_str(), "_id", s_key.length()) == 0) {
				m_flags &= ~HIPPO_BSON_ADD_ID;
				if (m_flags & HIPPO_BSON_RETURN_ID) {
					/* FIXME: Should we add a ref here? */
					m_out = data;
				}
			}
		}

		m_level++;
		if (unmangle) {
			const char *unmangledName;

			unmangledName = _getUnmangledPropertyName(s_key);
			convertElement(property_name != NULL ? &child : bson, unmangledName, data);
			free((void*) unmangledName);
		} else {
			convertElement(property_name != NULL ? &child : bson, s_key.c_str(), data);
		}
		m_level--;
	}

	if (m_level == 0 && (m_flags & HIPPO_BSON_ADD_ID)) {
		bson_oid_t oid;

		bson_oid_init(&oid, NULL);
		bson_append_oid(bson, "_id", strlen("_id"), &oid);

		if (m_flags & HIPPO_BSON_RETURN_ID) {
			static Class* c_objectId;
			c_objectId = Unit::lookupClass(s_MongoBsonObjectID_className.get());
			assert(c_objectId);
			Object obj = Object{c_objectId};

			MongoDBBsonObjectIDData* obj_data = Native::data<MongoDBBsonObjectIDData>(obj.get());
			bson_oid_copy(&oid, &obj_data->m_oid);

			m_out = obj;
		}
	}

	if (property_name != NULL) {
		if (_isPackedArray(document)) {
			bson_append_array_end(bson, &child);
		} else {
			bson_append_document_end(bson, &child);
		}
	}
}
Пример #25
0
/*
 * Add key-op-value to a bson filter document
 */
int db_mongodb_bson_filter_add(bson_t *doc, const db_key_t* _k, const db_op_t* _op,
		const db_val_t* _v, int idx)
{
	bson_t mdoc;
	db_key_t tkey;
	const db_val_t *tval;
	int vtype;
	str ocmp;

	tkey = _k[idx];
	tval = _v + idx;
	vtype = VAL_TYPE(tval);

	/* OP_EQ is handled separately */
	if(!strcmp(_op[idx], OP_LT)) {
		ocmp.s = "$lt";
		ocmp.len = 3;
	} else if(!strcmp(_op[idx], OP_LEQ)) {
		ocmp.s = "$lte";
		ocmp.len = 4;
	} else if(!strcmp(_op[idx], OP_GT)) {
		ocmp.s = "$gt";
		ocmp.len = 3;
	} else if(!strcmp(_op[idx], OP_GEQ)) {
		ocmp.s = "$gte";
		ocmp.len = 4;
	} else if(!strcmp(_op[idx], OP_NEQ)
			|| !strcmp(_op[idx], "!=")) {
		ocmp.s = "$ne";
		ocmp.len = 3;
	} else {
		LM_ERR("unsuported match operator: %s\n", _op[idx]);
		goto error;
	}

	if(!bson_append_document_begin(doc, tkey->s, tkey->len, &mdoc)) {
		LM_ERR("failed to append start to bson doc %.*s %s ... [%d]\n",
					tkey->len, tkey->s, ocmp.s, idx);
		goto error;
	}

	if(VAL_NULL(tval)) {
		if(!bson_append_null(&mdoc, ocmp.s, ocmp.len)) {
			LM_ERR("failed to append null to bson doc %.*s %s null [%d]\n",
					tkey->len, tkey->s, ocmp.s, idx);
			goto error;
		}
		goto done;
	}
	switch(vtype) {
		case DB1_INT:
			if(!bson_append_int32(&mdoc, ocmp.s, ocmp.len,
						VAL_INT(tval))) {
				LM_ERR("failed to append int to bson doc %.*s %s %d [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_INT(tval), idx);
				goto error;
			}
			break;

		case DB1_BIGINT:
			if(!bson_append_int64(&mdoc, ocmp.s, ocmp.len,
						VAL_BIGINT(tval ))) {
				LM_ERR("failed to append bigint to bson doc %.*s %s %lld [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_BIGINT(tval), idx);
				goto error;
			}
			return -1;

		case DB1_DOUBLE:
			if(!bson_append_double(&mdoc, ocmp.s, ocmp.len,
						VAL_DOUBLE(tval))) {
				LM_ERR("failed to append double to bson doc %.*s %s %f [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_DOUBLE(tval), idx);
				goto error;
			}
			break;

		case DB1_STRING:
			if(!bson_append_utf8(&mdoc, ocmp.s, ocmp.len,
						VAL_STRING(tval), strlen(VAL_STRING(tval))) ) {
				LM_ERR("failed to append string to bson doc %.*s %s %s [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_STRING(tval), idx);
				goto error;
			}
			break;

		case DB1_STR:

			if(!bson_append_utf8(&mdoc, ocmp.s, ocmp.len,
						VAL_STR(tval).s, VAL_STR(tval).len) ) {
				LM_ERR("failed to append str to bson doc %.*s %s %.*s [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_STR(tval).len, VAL_STR(tval).s, idx);
				goto error;
			}
			break;

		case DB1_DATETIME:
			if(!bson_append_time_t(&mdoc, ocmp.s, ocmp.len,
						VAL_TIME(tval))) {
				LM_ERR("failed to append time to bson doc %.*s %s %ld [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_TIME(tval), idx);
				goto error;
			}
			break;

		case DB1_BLOB:
			if(!bson_append_binary(&mdoc, ocmp.s, ocmp.len,
						BSON_SUBTYPE_BINARY,
						(const uint8_t *)VAL_BLOB(tval).s, VAL_BLOB(tval).len) ) {
				LM_ERR("failed to append blob to bson doc %.*s %s [bin] [%d]\n",
						tkey->len, tkey->s, ocmp.s, idx);
				goto error;
			}
			break;

		case DB1_BITMAP:
			if(!bson_append_int32(&mdoc, ocmp.s, ocmp.len,
						VAL_INT(tval))) {
				LM_ERR("failed to append bitmap to bson doc %.*s %s %d [%d]\n",
						tkey->len, tkey->s, ocmp.s, VAL_INT(tval), idx);
				goto error;
			}
			break;

		default:
			LM_ERR("val type [%d] not supported\n", vtype);
			goto error;
	}

done:
	if(!bson_append_document_end(doc, &mdoc)) {
		LM_ERR("failed to append end to bson doc %.*s %s ... [%d]\n",
					tkey->len, tkey->s, ocmp.s, idx);
		goto error;
	}
	return 0;
error:
	return -1;
}
static void
test_list (void)
{
   mongoc_gridfs_t *gridfs;
   mongoc_gridfs_file_t *file;
   mongoc_client_t *client;
   bson_error_t error;
   mongoc_gridfs_file_list_t *list;
   mongoc_gridfs_file_opt_t opt = { 0 };
   bson_t query, child;
   char buf[100];
   int i = 0;

   client = mongoc_client_new (gTestUri);
   assert (client);

   gridfs = get_test_gridfs (client, "list", &error);
   assert (gridfs);

   mongoc_gridfs_drop (gridfs, &error);

   for (i = 0; i < 3; i++) {
      bson_snprintf (buf, sizeof buf, "file.%d", i);
      opt.filename = buf;
      file = mongoc_gridfs_create_file (gridfs, &opt);
      assert (file);
      assert (mongoc_gridfs_file_save (file));
      mongoc_gridfs_file_destroy (file);
   }

   bson_init (&query);
   bson_append_document_begin (&query, "$orderby", -1, &child);
   bson_append_int32 (&child, "filename", -1, 1);
   bson_append_document_end (&query, &child);
   bson_append_document_begin (&query, "$query", -1, &child);
   bson_append_document_end (&query, &child);

   list = mongoc_gridfs_find (gridfs, &query);

   bson_destroy (&query);

   i = 0;
   while ((file = mongoc_gridfs_file_list_next (list))) {
      bson_snprintf (buf, sizeof buf, "file.%d", i++);

      assert (strcmp (mongoc_gridfs_file_get_filename (file), buf) == 0);

      mongoc_gridfs_file_destroy (file);
   }
   assert(i == 3);
   mongoc_gridfs_file_list_destroy (list);

   bson_init (&query);
   bson_append_utf8 (&query, "filename", -1, "file.1", -1);
   file = mongoc_gridfs_find_one (gridfs, &query, &error);
   assert (file);
   assert (strcmp (mongoc_gridfs_file_get_filename (file), "file.1") == 0);
   mongoc_gridfs_file_destroy (file);

   file = mongoc_gridfs_find_one_by_filename (gridfs, "file.1", &error);
   assert (file);
   assert (strcmp (mongoc_gridfs_file_get_filename (file), "file.1") == 0);
   mongoc_gridfs_file_destroy (file);

   drop_collections (gridfs, &error);
   mongoc_gridfs_destroy (gridfs);

   mongoc_client_destroy (client);
}
/** save a gridfs file */
bool
mongoc_gridfs_file_save (mongoc_gridfs_file_t *file)
{
   bson_t *selector, *update, child;
   const char *md5;
   const char *filename;
   const char *content_type;
   const bson_t *aliases;
   const bson_t *metadata;
   bool r;

   ENTRY;

   if (!file->is_dirty) {
      return 1;
   }

   if (file->page && _mongoc_gridfs_file_page_is_dirty (file->page)) {
      _mongoc_gridfs_file_flush_page (file);
   }

   md5 = mongoc_gridfs_file_get_md5 (file);
   filename = mongoc_gridfs_file_get_filename (file);
   content_type = mongoc_gridfs_file_get_content_type (file);
   aliases = mongoc_gridfs_file_get_aliases (file);
   metadata = mongoc_gridfs_file_get_metadata (file);

   selector = bson_new ();
   bson_append_value (selector, "_id", -1, &file->files_id);

   update = bson_new ();
   bson_append_document_begin (update, "$set", -1, &child);
   bson_append_int64 (&child, "length", -1, file->length);
   bson_append_int32 (&child, "chunkSize", -1, file->chunk_size);
   bson_append_date_time (&child, "uploadDate", -1, file->upload_date);

   if (md5) {
      bson_append_utf8 (&child, "md5", -1, md5, -1);
   }

   if (filename) {
      bson_append_utf8 (&child, "filename", -1, filename, -1);
   }

   if (content_type) {
      bson_append_utf8 (&child, "contentType", -1, content_type, -1);
   }

   if (aliases) {
      bson_append_array (&child, "aliases", -1, aliases);
   }

   if (metadata) {
      bson_append_document (&child, "metadata", -1, metadata);
   }

   bson_append_document_end (update, &child);

   r = mongoc_collection_update (file->gridfs->files, MONGOC_UPDATE_UPSERT,
                                 selector, update, NULL, &file->error);

   file->failed = !r;


   bson_destroy (selector);
   bson_destroy (update);

   file->is_dirty = 0;

   RETURN (r);
}
/** refresh a gridfs file's underlying page
 *
 * This unconditionally fetches the current page, even if the current page
 * covers the same theoretical chunk.
 */
static bool
_mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file)
{
   bson_t *query, *fields, child, child2;
   const bson_t *chunk;
   const char *key;
   bson_iter_t iter;

   uint32_t n;
   const uint8_t *data;
   uint32_t len;

   ENTRY;

   BSON_ASSERT (file);

   n = (uint32_t)(file->pos / file->chunk_size);

   if (file->page) {
      _mongoc_gridfs_file_page_destroy (file->page);
      file->page = NULL;
   }

   /* if the file pointer is past the end of the current file (I.e. pointing to
    * a new chunk) and we're on a chunk boundary, we'll pass the page
    * constructor a new empty page */
   if ((int64_t)file->pos >= file->length && !(file->pos % file->chunk_size)) {
      data = (uint8_t *)"";
      len = 0;
   } else {
      /* if we have a cursor, but the cursor doesn't have the chunk we're going
       * to need, destroy it (we'll grab a new one immediately there after) */
      if (file->cursor &&
          !(file->cursor_range[0] >= n && file->cursor_range[1] <= n)) {
         mongoc_cursor_destroy (file->cursor);
         file->cursor = NULL;
      }

      if (!file->cursor) {
         query = bson_new ();

         bson_append_document_begin(query, "$query", -1, &child);
            bson_append_value (&child, "files_id", -1, &file->files_id);

            bson_append_document_begin (&child, "n", -1, &child2);
               bson_append_int32 (&child2, "$gte", -1, (int32_t)(file->pos / file->chunk_size));
            bson_append_document_end (&child, &child2);
         bson_append_document_end(query, &child);

         bson_append_document_begin(query, "$orderby", -1, &child);
            bson_append_int32 (&child, "n", -1, 1);
         bson_append_document_end(query, &child);

         fields = bson_new ();
         bson_append_int32 (fields, "n", -1, 1);
         bson_append_int32 (fields, "data", -1, 1);
         bson_append_int32 (fields, "_id", -1, 0);

         /* find all chunks greater than or equal to our current file pos */
         file->cursor = mongoc_collection_find (file->gridfs->chunks,
                                                MONGOC_QUERY_NONE, 0, 0, 0, query,
                                                fields, NULL);

         file->cursor_range[0] = n;
         file->cursor_range[1] = (uint32_t)(file->length / file->chunk_size);

         bson_destroy (query);
         bson_destroy (fields);

         BSON_ASSERT (file->cursor);
      }

      /* we might have had a cursor before, then seeked ahead past a chunk.
       * iterate until we're on the right chunk */
      while (file->cursor_range[0] <= n) {
         if (!mongoc_cursor_next (file->cursor, &chunk)) {
            if (file->cursor->failed) {
               memcpy (&(file->error), &(file->cursor->error),
                       sizeof (bson_error_t));
               file->failed = true;
            }

            RETURN (0);
         }

         file->cursor_range[0]++;
      }

      bson_iter_init (&iter, chunk);

      /* grab out what we need from the chunk */
      while (bson_iter_next (&iter)) {
         key = bson_iter_key (&iter);

         if (strcmp (key, "n") == 0) {
            n = bson_iter_int32 (&iter);
         } else if (strcmp (key, "data") == 0) {
            bson_iter_binary (&iter, NULL, &len, &data);
         } else {
            RETURN (0);
         }
      }

      /* we're on the wrong chunk somehow... probably because our gridfs is
       * missing chunks.
       *
       * TODO: maybe we should make more noise here?
       */

      if (!(n == file->pos / file->chunk_size)) {
         return 0;
      }
   }

   file->page = _mongoc_gridfs_file_page_new (data, len, file->chunk_size);

   /* seek in the page towards wherever we're supposed to be */
   RETURN (_mongoc_gridfs_file_page_seek (file->page, file->pos %
                                         file->chunk_size));
}
/* Update result with the read prefs, following Server Selection Spec.
 * The driver must have discovered the server is a mongos.
 */
static void
_apply_read_preferences_mongos (const mongoc_read_prefs_t *read_prefs,
                                const bson_t *query_bson,
                                mongoc_apply_read_prefs_result_t *result /* OUT */)
{
   mongoc_read_mode_t mode;
   const bson_t *tags = NULL;
   bson_t child;
   const char *mode_str;

   mode = mongoc_read_prefs_get_mode (read_prefs);
   if (read_prefs) {
      tags = mongoc_read_prefs_get_tags (read_prefs);
   }

   /* Server Selection Spec says:
    *
    * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag
    *   and MUST NOT use $readPreference
    *
    * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and
    *   MUST also use $readPreference
    *
    * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol
    *   flag and MUST also use $readPreference
    *
    * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol
    *   flag. If the read preference contains a non-empty tag_sets parameter,
    *   drivers MUST use $readPreference; otherwise, drivers MUST NOT use
    *   $readPreference
    *
    * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and
    *   MUST also use $readPreference
    */
   if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) {
      result->flags |= MONGOC_QUERY_SLAVE_OK;

   } else if (mode != MONGOC_READ_PRIMARY) {
      result->flags |= MONGOC_QUERY_SLAVE_OK;

      /* Server Selection Spec: "When any $ modifier is used, including the
       * $readPreference modifier, the query MUST be provided using the $query
       * modifier".
       *
       * This applies to commands, too.
       */
      result->query_with_read_prefs = bson_new ();
      result->query_owned = true;

      if (bson_has_field (query_bson, "$query")) {
         bson_concat (result->query_with_read_prefs, query_bson);
      } else {
         bson_append_document (result->query_with_read_prefs,
                               "$query", 6, query_bson);
      }

      bson_append_document_begin (result->query_with_read_prefs,
                                  "$readPreference", 15, &child);
      mode_str = _get_read_mode_string (mode);
      bson_append_utf8 (&child, "mode", 4, mode_str, -1);
      if (!bson_empty0 (tags)) {
         bson_append_array (&child, "tags", 4, tags);
      }

      bson_append_document_end (result->query_with_read_prefs, &child);
   }
}
Пример #30
0
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t           *client,
                    const char                *db_and_collection,
                    mongoc_query_flags_t       flags,
                    uint32_t                   skip,
                    uint32_t                   limit,
                    uint32_t                   batch_size,
                    bool                       is_command,
                    const bson_t              *query,
                    const bson_t              *fields,
                    const mongoc_read_prefs_t *read_prefs)
{
   mongoc_read_prefs_t *local_read_prefs = NULL;
   mongoc_read_mode_t mode;
   mongoc_cursor_t *cursor;
   const bson_t *tags;
   bson_iter_t iter;
   const char *key;
   const char *mode_str;
   bson_t child;
   bool found = false;
   int i;

   ENTRY;

   BSON_ASSERT (client);
   BSON_ASSERT (db_and_collection);
   BSON_ASSERT (query);

   if (!read_prefs) {
      read_prefs = client->read_prefs;
   }

   cursor = bson_malloc0 (sizeof *cursor);

   /*
    * CDRIVER-244:
    *
    * If this is a command, we need to verify we can send it to the location
    * specified by the read preferences. Otherwise, log a warning that we
    * are rerouting to the primary instance.
    */
   if (is_command &&
       read_prefs &&
       (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY) &&
       bson_iter_init (&iter, query) &&
       bson_iter_next (&iter) &&
       (key = bson_iter_key (&iter))) {
      for (i = 0; gSecondaryOkCommands [i]; i++) {
         if (0 == strcasecmp (key, gSecondaryOkCommands [i])) {
            found = true;
            break;
         }
      }
      if (!found) {
         cursor->redir_primary = true;
         local_read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
         read_prefs = local_read_prefs;
         MONGOC_INFO ("Database command \"%s\" rerouted to primary node", key);
      }
   }

   /*
    * Cursors execute their query lazily. This sadly means that we must copy
    * some extra data around between the bson_t structures. This should be
    * small in most cases, so it reduces to a pure memcpy. The benefit to this
    * design is simplified error handling by API consumers.
    */

   cursor->client = client;
   bson_strncpy (cursor->ns, db_and_collection, sizeof cursor->ns);
   cursor->nslen = (uint32_t)strlen(cursor->ns);
   cursor->flags = flags;
   cursor->skip = skip;
   cursor->limit = limit;
   cursor->batch_size = batch_size;
   cursor->is_command = is_command;

#define MARK_FAILED(c) \
   do { \
      (c)->failed = true; \
      (c)->done = true; \
      (c)->end_of_event = true; \
      (c)->sent = true; \
   } while (0)

   /* we can't have exhaust queries with limits */
   if ((flags & MONGOC_QUERY_EXHAUST) && limit) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_CURSOR,
                      MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                      "Cannot specify MONGOC_QUERY_EXHAUST and set a limit.");
      MARK_FAILED (cursor);
      GOTO (finish);
   }

   /* we can't have exhaust queries with sharded clusters */
   if ((flags & MONGOC_QUERY_EXHAUST) &&
       (client->cluster.mode == MONGOC_CLUSTER_SHARDED_CLUSTER)) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_CURSOR,
                      MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                      "Cannot specify MONGOC_QUERY_EXHAUST with sharded cluster.");
      MARK_FAILED (cursor);
      GOTO (finish);
   }

   /*
    * Check types of various optional parameters.
    */
   if (!is_command) {
      if (bson_iter_init_find (&iter, query, "$explain") &&
          !(BSON_ITER_HOLDS_BOOL (&iter) || BSON_ITER_HOLDS_INT32 (&iter))) {
         bson_set_error (&cursor->error,
                         MONGOC_ERROR_CURSOR,
                         MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                         "$explain must be a boolean.");
         MARK_FAILED (cursor);
         GOTO (finish);
      }

      if (bson_iter_init_find (&iter, query, "$snapshot") &&
          !BSON_ITER_HOLDS_BOOL (&iter) &&
          !BSON_ITER_HOLDS_INT32 (&iter)) {
         bson_set_error (&cursor->error,
                         MONGOC_ERROR_CURSOR,
                         MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                         "$snapshot must be a boolean.");
         MARK_FAILED (cursor);
         GOTO (finish);
      }
   }

   if (!cursor->is_command && !bson_has_field (query, "$query")) {
      bson_init (&cursor->query);
      bson_append_document (&cursor->query, "$query", 6, query);
   } else {
      bson_copy_to (query, &cursor->query);
   }

   if (read_prefs) {
      cursor->read_prefs = mongoc_read_prefs_copy (read_prefs);

      mode = mongoc_read_prefs_get_mode (read_prefs);
      tags = mongoc_read_prefs_get_tags (read_prefs);

      if (mode != MONGOC_READ_PRIMARY) {
         flags |= MONGOC_QUERY_SLAVE_OK;

         if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) {
            bson_append_document_begin (&cursor->query, "$readPreference",
                                        15, &child);
            mode_str = _mongoc_cursor_get_read_mode_string (mode);
            bson_append_utf8 (&child, "mode", 4, mode_str, -1);
            if (tags) {
               bson_append_array (&child, "tags", 4, tags);
            }
            bson_append_document_end (&cursor->query, &child);
         }
      }
   }

   if (fields) {
      bson_copy_to(fields, &cursor->fields);
   } else {
      bson_init(&cursor->fields);
   }

   _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL);

finish:
   mongoc_counter_cursors_active_inc();

   if (local_read_prefs) {
      mongoc_read_prefs_destroy (local_read_prefs);
   }

   RETURN (cursor);
}