static void test_bson_copy_to_excluding (void) { bson_iter_t iter; bson_bool_t r; bson_t b; bson_t c; int i; bson_init(&b); bson_append_int32(&b, "a", 1, 1); bson_append_int32(&b, "b", 1, 2); bson_copy_to_excluding(&b, &c, "b", NULL); r = bson_iter_init_find(&iter, &c, "a"); assert(r); r = bson_iter_init_find(&iter, &c, "b"); assert(!r); i = bson_count_keys(&b); assert_cmpint(i, ==, 2); i = bson_count_keys(&c); assert_cmpint(i, ==, 1); bson_destroy(&b); bson_destroy(&c); }
/* * If error is not set, set code from first document in array like * [{"code": 64, "errmsg": "duplicate"}, ...]. Format the error message * from all errors in array. */ static void _set_error_from_response (bson_t *bson_array, mongoc_error_domain_t domain, const char *error_type, bson_error_t *error /* OUT */) { bson_iter_t array_iter; bson_iter_t doc_iter; bson_string_t *compound_err; const char *errmsg = NULL; int32_t code = 0; uint32_t n_keys, i; compound_err = bson_string_new (NULL); n_keys = bson_count_keys (bson_array); if (n_keys > 1) { bson_string_append_printf ( compound_err, "Multiple %s errors: ", error_type); } if (!bson_empty0 (bson_array) && bson_iter_init (&array_iter, bson_array)) { /* get first code and all error messages */ i = 0; while (bson_iter_next (&array_iter)) { if (BSON_ITER_HOLDS_DOCUMENT (&array_iter) && bson_iter_recurse (&array_iter, &doc_iter)) { /* parse doc, which is like {"code": 64, "errmsg": "duplicate"} */ while (bson_iter_next (&doc_iter)) { /* use the first error code we find */ if (BSON_ITER_IS_KEY (&doc_iter, "code") && code == 0) { code = bson_iter_int32 (&doc_iter); } else if (BSON_ITER_IS_KEY (&doc_iter, "errmsg")) { errmsg = bson_iter_utf8 (&doc_iter, NULL); /* build message like 'Multiple write errors: "foo", "bar"' */ if (n_keys > 1) { bson_string_append_printf (compound_err, "\"%s\"", errmsg); if (i < n_keys - 1) { bson_string_append (compound_err, ", "); } } else { /* single error message */ bson_string_append (compound_err, errmsg); } } } i++; } } if (code && compound_err->len) { bson_set_error ( error, domain, (uint32_t) code, "%s", compound_err->str); } } bson_string_free (compound_err, true); }
static int _score_tags (const bson_t *read_tags, const bson_t *node_tags) { uint32_t len; bson_iter_t iter; const char *key; const char *str; int count; int i; bson_return_val_if_fail(read_tags, -1); bson_return_val_if_fail(node_tags, -1); count = bson_count_keys(read_tags); if (!bson_empty(read_tags) && bson_iter_init(&iter, read_tags)) { for (i = count; bson_iter_next(&iter); i--) { if (BSON_ITER_HOLDS_UTF8(&iter)) { key = bson_iter_key(&iter); str = bson_iter_utf8(&iter, &len); if (_contains_tag(node_tags, key, str, len)) { return count; } } } return -1; } return 0; }
static void mongoc_uri_parse_read_prefs (mongoc_uri_t *uri, const char *str) { const char *end_keyval; const char *end_key; bson_t b; char *keyval; char *key; char keystr[32]; int i; bson_init(&b); again: if ((keyval = scan_to_unichar(str, ',', &end_keyval))) { if ((key = scan_to_unichar(keyval, ':', &end_key))) { bson_append_utf8(&b, key, -1, end_key + 1, -1); bson_free(key); } bson_free(keyval); str = end_keyval + 1; goto again; } else { if ((key = scan_to_unichar(str, ':', &end_key))) { bson_append_utf8(&b, key, -1, end_key + 1, -1); bson_free(key); } } i = bson_count_keys(&uri->read_prefs); snprintf(keystr, sizeof keystr, "%u", i); bson_append_document(&uri->read_prefs, keystr, -1, &b); bson_destroy(&b); }
stdx::optional<bsoncxx::document::view> read_preference::tags() const { const bson_t* bson_tags = libmongoc::read_prefs_get_tags(_impl->read_preference_t); if (bson_count_keys(bson_tags)) return bsoncxx::document::view(bson_get_data(bson_tags), bson_tags->len); return stdx::optional<bsoncxx::document::view>{}; }
static int _score_tags (const bson_t *read_tags, const bson_t *node_tags) { uint32_t len; bson_iter_t iter; bson_iter_t sub_iter; const char *key; const char *str; int count; bool node_matches_set; bson_return_val_if_fail(read_tags, -1); bson_return_val_if_fail(node_tags, -1); count = bson_count_keys(read_tags); /* Execute this block if read tags were provided, else bail and return 0 (all nodes equal) */ if (!bson_empty(read_tags) && bson_iter_init(&iter, read_tags)) { /* * Iterate over array of read tag sets provided (each element is a tag set) * Tag sets are provided in order of preference so return the count of the * first set that matches the node or -1 if no set matched the node. */ while (count && bson_iter_next(&iter)) { if (BSON_ITER_HOLDS_DOCUMENT(&iter) && bson_iter_recurse(&iter, &sub_iter)) { node_matches_set = true; /* Iterate over the key/value pairs (tags) in the current set */ while (bson_iter_next(&sub_iter) && BSON_ITER_HOLDS_UTF8(&sub_iter)) { key = bson_iter_key(&sub_iter); str = bson_iter_utf8(&sub_iter, &len); /* If any of the tags do not match, this node cannot satisfy this tag set. */ if (!_contains_tag(node_tags, key, str, len)) { node_matches_set = false; break; } } /* This set matched, return the count as the score */ if (node_matches_set) { return count; } /* Decrement the score and try to match the next set. */ count--; } } return -1; } return 0; }
int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, /* IN */ bson_t *dest, /* IN */ bson_iter_t *iter) /* IN */ { const bson_value_t *value; bson_iter_t ar; bson_iter_t citer; int32_t idx; int32_t count = 0; int32_t aridx; bson_t child; const char *keyptr = NULL; char key[12]; int len; ENTRY; BSON_ASSERT (result); BSON_ASSERT (dest); BSON_ASSERT (iter); BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter)); aridx = bson_count_keys (dest); if (bson_iter_recurse (iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer)) { len = (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key); bson_append_document_begin (dest, keyptr, len, &child); while (bson_iter_next (&citer)) { if (BSON_ITER_IS_KEY (&citer, "index")) { idx = bson_iter_int32 (&citer) + offset; BSON_APPEND_INT32 (&child, "index", idx); } else { value = bson_iter_value (&citer); BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value); } } bson_append_document_end (dest, &child); count++; } } } RETURN (count); }
void mongoc_read_prefs_add_tag (mongoc_read_prefs_t *read_prefs, const bson_t *tag) { bson_t empty = BSON_INITIALIZER; char str[16]; int key; BSON_ASSERT (read_prefs); key = bson_count_keys (&read_prefs->tags); bson_snprintf (str, sizeof str, "%d", key); if (tag) { bson_append_document (&read_prefs->tags, str, -1, tag); } else { bson_append_document (&read_prefs->tags, str, -1, &empty); } }
/*! * \brief Get and convert columns from a result * * Get and convert columns from a result, fills the result structure * with data from the database. * \param _h database connection * \param _r database result set * \return 0 on success, negative on failure */ int db_mongodb_get_columns(const db1_con_t* _h, db1_res_t* _r) { int col; db_mongodb_result_t *mgres; bson_iter_t riter; bson_iter_t citer; bson_t *cdoc; const char *colname; bson_type_t coltype; if ((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } mgres = (db_mongodb_result_t*)RES_PTR(_r); if(!mgres->rdoc) { mgres->nrcols = 0; return 0; } if(mgres->nrcols==0 || mgres->colsdoc==NULL) { mgres->nrcols = (int)bson_count_keys(mgres->rdoc); if(mgres->nrcols==0) { LM_ERR("no keys in bson document\n"); return -1; } cdoc = mgres->rdoc; } else { cdoc = mgres->colsdoc; } RES_COL_N(_r) = mgres->nrcols; if (!RES_COL_N(_r)) { LM_ERR("no columns returned from the query\n"); return -2; } else { LM_DBG("%d columns returned from the query\n", RES_COL_N(_r)); } if (db_allocate_columns(_r, RES_COL_N(_r)) != 0) { RES_COL_N(_r) = 0; LM_ERR("could not allocate columns\n"); return -3; } if (!bson_iter_init (&citer, cdoc)) { LM_ERR("failed to initialize columns iterator\n"); return -3; } if(mgres->colsdoc) { if (!bson_iter_init (&riter, mgres->rdoc)) { LM_ERR("failed to initialize result iterator\n"); return -3; } } col = 0; while (bson_iter_next (&citer)) { if(col >= RES_COL_N(_r)) { LM_ERR("invalid number of columns (%d/%d)\n", col, RES_COL_N(_r)); return -4; } colname = bson_iter_key (&citer); LM_DBG("Found a field[%d] named: %s\n", col, colname); if(mgres->colsdoc) { if(!bson_iter_find(&riter, colname)) { LM_ERR("field [%s] not found in result iterator\n", colname); return -4; } coltype = bson_iter_type(&riter); } else { coltype = bson_iter_type(&citer); } RES_NAMES(_r)[col] = (str*)pkg_malloc(sizeof(str)); if (! RES_NAMES(_r)[col]) { LM_ERR("no private memory left\n"); db_free_columns(_r); return -4; } LM_DBG("allocate %lu bytes for RES_NAMES[%d] at %p\n", (unsigned long)sizeof(str), col, RES_NAMES(_r)[col]); /* pointer linked here is part of the result structure */ RES_NAMES(_r)[col]->s = (char*)colname; RES_NAMES(_r)[col]->len = strlen(colname); switch(coltype) { case BSON_TYPE_BOOL: case BSON_TYPE_INT32: case BSON_TYPE_TIMESTAMP: LM_DBG("use DB1_INT result type\n"); RES_TYPES(_r)[col] = DB1_INT; break; case BSON_TYPE_INT64: LM_DBG("use DB1_BIGINT result type\n"); RES_TYPES(_r)[col] = DB1_BIGINT; break; case BSON_TYPE_DOUBLE: LM_DBG("use DB1_DOUBLE result type\n"); RES_TYPES(_r)[col] = DB1_DOUBLE; break; case BSON_TYPE_DATE_TIME: LM_DBG("use DB1_DATETIME result type\n"); RES_TYPES(_r)[col] = DB1_DATETIME; break; case BSON_TYPE_BINARY: LM_DBG("use DB1_BLOB result type\n"); RES_TYPES(_r)[col] = DB1_BLOB; break; case BSON_TYPE_UTF8: LM_DBG("use DB1_STRING result type\n"); RES_TYPES(_r)[col] = DB1_STRING; break; #if 0 case BSON_TYPE_EOD: case BSON_TYPE_DOCUMENT: case BSON_TYPE_ARRAY: case BSON_TYPE_UNDEFINED: case BSON_TYPE_OID: case BSON_TYPE_NULL: case BSON_TYPE_REGEX: case BSON_TYPE_DBPOINTER: case BSON_TYPE_CODE: case BSON_TYPE_SYMBOL: case BSON_TYPE_CODEWSCOPE: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: #endif default: LM_INFO("unhandled data type column (%.*s) type id (%d), " "use DB1_STRING as default\n", RES_NAMES(_r)[col]->len, RES_NAMES(_r)[col]->s, coltype); RES_TYPES(_r)[col] = DB1_STRING; break; } LM_DBG("RES_NAMES(%p)[%d]=[%.*s] (%d)\n", RES_NAMES(_r)[col], col, RES_NAMES(_r)[col]->len, RES_NAMES(_r)[col]->s, coltype); col++; } return 0; }
size_t mongoc_server_description_filter_eligible ( mongoc_server_description_t **descriptions, size_t description_len, const mongoc_read_prefs_t *read_prefs) { const bson_t *rp_tags; bson_iter_t rp_tagset_iter; bson_iter_t rp_iter; bson_iter_t sd_iter; uint32_t rp_len; uint32_t sd_len; const char *rp_val; const char *sd_val; bool *sd_matched = NULL; size_t found; size_t i; size_t rval = 0; if (!read_prefs) { /* NULL read_prefs is PRIMARY, no tags to filter by */ return description_len; } rp_tags = mongoc_read_prefs_get_tags (read_prefs); if (bson_count_keys (rp_tags) == 0) { return description_len; } sd_matched = (bool *) bson_malloc0 (sizeof(bool) * description_len); bson_iter_init (&rp_tagset_iter, rp_tags); /* for each read preference tagset */ while (bson_iter_next (&rp_tagset_iter)) { found = description_len; for (i = 0; i < description_len; i++) { sd_matched[i] = true; bson_iter_recurse (&rp_tagset_iter, &rp_iter); while (bson_iter_next (&rp_iter)) { /* TODO: can we have non-utf8 tags? */ rp_val = bson_iter_utf8 (&rp_iter, &rp_len); if (bson_iter_init_find (&sd_iter, &descriptions[i]->tags, bson_iter_key (&rp_iter))) { /* If the server description has that key */ sd_val = bson_iter_utf8 (&sd_iter, &sd_len); if (! (sd_len == rp_len && (0 == memcmp(rp_val, sd_val, rp_len)))) { /* If the key value doesn't match, no match */ sd_matched[i] = false; found--; } } else { /* If the server description doesn't have that key, no match */ sd_matched[i] = false; found--; break; } } } if (found) { for (i = 0; i < description_len; i++) { if (! sd_matched[i]) { descriptions[i] = NULL; } } rval = found; goto CLEANUP; } } for (i = 0; i < description_len; i++) { if (! sd_matched[i]) { descriptions[i] = NULL; } } CLEANUP: bson_free (sd_matched); return rval; }