bool bson_find_create_table (bson_t *bson_schema, const char *table_name, bson_iter_t *iter_col) { bson_iter_t iter_json, iter_ary, iter_sql, iter_table_prop; bson_iter_init_find (&iter_json, bson_schema, "json") || DIE; BSON_ITER_HOLDS_ARRAY (&iter_json) || DIE; bson_iter_recurse (&iter_json, &iter_ary) || DIE; while (bson_iter_next (&iter_ary)) { (BSON_ITER_HOLDS_DOCUMENT (&iter_ary) || DIE); bson_iter_recurse (&iter_ary, &iter_sql) || DIE; if (bson_iter_find (&iter_sql, "create_table") && (BSON_ITER_HOLDS_DOCUMENT (&iter_sql) || DIE) && (bson_iter_recurse (&iter_sql, &iter_table_prop) || DIE) && (bson_iter_find (&iter_table_prop, "table_name") || DIE) && (BSON_ITER_HOLDS_UTF8 (&iter_table_prop) || DIE) && (strcmp (bson_iter_utf8 (&iter_table_prop, NULL), table_name) == 0) && (bson_iter_find (&iter_table_prop, "columns") || DIE) && (BSON_ITER_HOLDS_ARRAY (&iter_table_prop) || DIE)) { bson_iter_recurse (&iter_table_prop, iter_col) || DIE; return true; } } return (false); }
/* * Start iterating the reply to an "aggregate", "find", "getMore" etc. command: * * {cursor: {id: 1234, ns: "db.collection", firstBatch: [...]}} */ bool _mongoc_cursor_cursorid_start_batch (mongoc_cursor_t *cursor) { mongoc_cursor_cursorid_t *cid; bson_iter_t iter; bson_iter_t child; const char *ns; uint32_t nslen; cid = (mongoc_cursor_cursorid_t *)cursor->iface_data; BSON_ASSERT (cid); if (bson_iter_init_find (&iter, &cid->array, "cursor") && BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child)) { while (bson_iter_next (&child)) { if (BSON_ITER_IS_KEY (&child, "id")) { cursor->rpc.reply.cursor_id = bson_iter_as_int64 (&child); } else if (BSON_ITER_IS_KEY (&child, "ns")) { ns = bson_iter_utf8 (&child, &nslen); _mongoc_set_cursor_ns (cursor, ns, nslen); } else if (BSON_ITER_IS_KEY (&child, "firstBatch") || BSON_ITER_IS_KEY (&child, "nextBatch")) { if (BSON_ITER_HOLDS_ARRAY (&child) && bson_iter_recurse (&child, &cid->batch_iter)) { cid->in_batch = true; } } } } return cid->in_batch; }
bool _mongoc_convert_array (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error) { uint32_t len; const uint8_t *data; bson_t value; if (!BSON_ITER_HOLDS_ARRAY (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain array," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bson_iter_array (iter, &len, &data); if (!bson_init_static (&value, data, len)) { BSON_ERR ("Corrupt BSON in field \"%s\" in opts", bson_iter_key (iter)); } bson_destroy (doc); bson_copy_to (&value, doc); return true; }
/* {{{ proto WriteError[] WriteResult::getWriteErrors() Returns any write errors that occurred */ PHP_METHOD(WriteResult, getWriteErrors) { bson_iter_t iter, child; php_phongo_writeresult_t *intern; SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used) intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); if (bson_iter_init_find(&iter, intern->reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) { while (bson_iter_next(&child)) { bson_t cbson; uint32_t len; const uint8_t *data; #if PHP_VERSION_ID >= 70000 zval writeerror; #else zval *writeerror = NULL; #endif if (!BSON_ITER_HOLDS_DOCUMENT(&child)) { continue; } bson_iter_document(&child, &len, &data); if (!bson_init_static(&cbson, data, len)) { continue; } #if PHP_VERSION_ID >= 70000 object_init_ex(&writeerror, php_phongo_writeerror_ce); if (!phongo_writeerror_init(&writeerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeerror); continue; } add_next_index_zval(return_value, &writeerror); #else MAKE_STD_ZVAL(writeerror); object_init_ex(writeerror, php_phongo_writeerror_ce); if (!phongo_writeerror_init(writeerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeerror); continue; } add_next_index_zval(return_value, writeerror); #endif } } }
bool _mongoc_cursor_array_prime (mongoc_cursor_t *cursor) { bool ret = true; mongoc_cursor_array_t *arr; bson_iter_t iter; ENTRY; arr = (mongoc_cursor_array_t *)cursor->iface_data; if (!arr->has_array) { arr->has_array = true; ret = _mongoc_cursor_next (cursor, &arr->result); if (!(ret && bson_iter_init_find (&iter, arr->result, arr->field_name) && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &arr->iter))) { ret = false; } } return ret; }
/* {{{ proto array WriteResult::getUpsertedIds() Returns the identifiers generated by the server for upsert operations. */ PHP_METHOD(WriteResult, getUpsertedIds) { bson_iter_t iter, child; php_phongo_writeresult_t *intern; SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used) intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); if (bson_iter_init_find(&iter, intern->reply, "upserted") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) { while (bson_iter_next(&child)) { int32_t index; bson_iter_t outer; if (!BSON_ITER_HOLDS_DOCUMENT(&child) || !bson_iter_recurse(&child, &outer)) { continue; } if (!bson_iter_find(&outer, "index") || !BSON_ITER_HOLDS_INT32(&outer)) { continue; } index = bson_iter_int32(&outer); if (!bson_iter_find(&outer, "_id")) { continue; } if (BSON_ITER_HOLDS_OID(&outer)) { #if PHP_VERSION_ID >= 70000 zval zid; php_phongo_objectid_new_from_oid(&zid, bson_iter_oid(&outer) TSRMLS_CC); add_index_zval(return_value, index, &zid); #else zval *zid = NULL; MAKE_STD_ZVAL(zid); php_phongo_objectid_new_from_oid(zid, bson_iter_oid(&outer) TSRMLS_CC); add_index_zval(return_value, index, zid); #endif } else if (BSON_ITER_HOLDS_INT32(&outer)) { int32_t val = bson_iter_int32(&outer); add_index_long(return_value, index, val); } else if (BSON_ITER_HOLDS_INT64(&outer)) { int64_t val = bson_iter_int64(&outer); ADD_INDEX_INT64(return_value, index, val); } } } }
/*--------------------------------------------------------------------------- * * _change_stream_init -- * * Called after @stream has the collection name, database name, read * preferences, and read concern set. Creates the change streams * cursor. * *-------------------------------------------------------------------------- */ void _change_stream_init (mongoc_change_stream_t *stream, const bson_t *pipeline, const bson_t *opts) { BSON_ASSERT (pipeline); stream->max_await_time_ms = -1; stream->batch_size = -1; bson_init (&stream->pipeline_to_append); bson_init (&stream->resume_token); bson_init (&stream->err_doc); if (!_mongoc_change_stream_opts_parse ( stream->client, opts, &stream->opts, &stream->err)) { return; } stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument); if (!bson_empty (&(stream->opts.resumeAfter))) { bson_append_document ( &stream->resume_token, "resumeAfter", 11, &(stream->opts.resumeAfter)); } _mongoc_timestamp_set (&stream->operation_time, &(stream->opts.startAtOperationTime)); stream->batch_size = stream->opts.batchSize; stream->max_await_time_ms = stream->opts.maxAwaitTimeMS; /* Accept two forms of user pipeline: * 1. A document like: { "pipeline": [...] } * 2. An array-like document: { "0": {}, "1": {}, ... } * If the passed pipeline is invalid, we pass it along and let the server * error instead. */ if (!bson_empty (pipeline)) { bson_iter_t iter; if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { if (!BSON_APPEND_VALUE (&stream->pipeline_to_append, "pipeline", bson_iter_value (&iter))) { CHANGE_STREAM_ERR ("pipeline"); } } else { if (!BSON_APPEND_ARRAY ( &stream->pipeline_to_append, "pipeline", pipeline)) { CHANGE_STREAM_ERR ("pipeline"); } } } if (stream->err.code == 0) { (void) _make_cursor (stream); } }
/* Construct the aggregate command in cmd: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); bson_append_utf8 (command, "aggregate", 9, stream->coll->collection, stream->coll->collectionlen); bson_append_array_begin (command, "pipeline", 8, &pipeline); /* Append the $changeStream stage */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, &stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* The user pipeline may consist of invalid stages or non-documents. * Append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
void ha_replica_set_wait_for_healthy (ha_replica_set_t *replica_set) { bson_iter_t iter; bson_iter_t ar; bson_iter_t member; const char *stateStr; bson_t status; again: sleep(1); if (!ha_replica_set_get_status(replica_set, &status)) { MONGOC_INFO("Failed to get replicaSet status. " "Sleeping 1 second."); goto again; } #if 0 { char *str; str = bson_as_json(&status, NULL); printf("%s\n", str); bson_free(str); } #endif if (!bson_iter_init_find(&iter, &status, "members") || !BSON_ITER_HOLDS_ARRAY(&iter) || !bson_iter_recurse(&iter, &ar)) { bson_destroy(&status); MONGOC_INFO("ReplicaSet has not yet come online. " "Sleeping 1 second."); goto again; } while (bson_iter_next(&ar)) { if (BSON_ITER_HOLDS_DOCUMENT(&ar) && bson_iter_recurse(&ar, &member) && bson_iter_find(&member, "stateStr") && (stateStr = bson_iter_utf8(&member, NULL))) { if (!!strcmp(stateStr, "PRIMARY") && !!strcmp(stateStr, "SECONDARY") && !!strcmp(stateStr, "ARBITER")) { bson_destroy(&status); MONGOC_INFO("Found unhealthy node. Sleeping 1 second."); goto again; } } } bson_destroy(&status); }
int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, /* IN */ bson_t *dest, /* IN */ bson_iter_t *iter) /* IN */ { const bson_value_t *value; bson_iter_t ar; bson_iter_t citer; int32_t idx; int32_t count = 0; int32_t aridx; bson_t child; const char *keyptr = NULL; char key[12]; int len; ENTRY; BSON_ASSERT (result); BSON_ASSERT (dest); BSON_ASSERT (iter); BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter)); aridx = bson_count_keys (dest); if (bson_iter_recurse (iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer)) { len = (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key); bson_append_document_begin (dest, keyptr, len, &child); while (bson_iter_next (&citer)) { if (BSON_ITER_IS_KEY (&citer, "index")) { idx = bson_iter_int32 (&citer) + offset; BSON_APPEND_INT32 (&child, "index", idx); } else { value = bson_iter_value (&citer); BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value); } } bson_append_document_end (dest, &child); count++; } } } RETURN (count); }
SEXP ConvertValue(bson_iter_t* iter){ if(BSON_ITER_HOLDS_INT32(iter)){ return ScalarInteger(bson_iter_int32(iter)); } else if(BSON_ITER_HOLDS_NULL(iter)){ return R_NilValue; } else if(BSON_ITER_HOLDS_BOOL(iter)){ return ScalarLogical(bson_iter_bool(iter)); } else if(BSON_ITER_HOLDS_DOUBLE(iter)){ return ScalarReal(bson_iter_double(iter)); } else if(BSON_ITER_HOLDS_INT64(iter)){ return ScalarReal((double) bson_iter_int64(iter)); } else if(BSON_ITER_HOLDS_UTF8(iter)){ return mkStringUTF8(bson_iter_utf8(iter, NULL)); } else if(BSON_ITER_HOLDS_CODE(iter)){ return mkStringUTF8(bson_iter_code(iter, NULL)); } else if(BSON_ITER_HOLDS_BINARY(iter)){ return ConvertBinary(iter); } else if(BSON_ITER_HOLDS_DATE_TIME(iter)){ return ConvertDate(iter); } else if(BSON_ITER_HOLDS_OID(iter)){ const bson_oid_t *val = bson_iter_oid(iter); char str[25]; bson_oid_to_string(val, str); return mkString(str); } else if(BSON_ITER_HOLDS_ARRAY(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertArray(&child1, &child2); } else if(BSON_ITER_HOLDS_DOCUMENT(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertObject(&child1, &child2); } else { stop("Unimplemented BSON type %d\n", bson_iter_type(iter)); } }
static bool _mongoc_matcher_op_in_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { mongoc_matcher_op_compare_t op; op.base.opcode = MONGOC_MATCHER_OPCODE_EQ; op.path = compare->path; if (!BSON_ITER_HOLDS_ARRAY (&compare->iter) || !bson_iter_recurse (&compare->iter, &op.iter)) { return false; } while (bson_iter_next (&op.iter)) { if (_mongoc_matcher_op_eq_match (&op, iter)) { return true; } } return false; }
SEXP ConvertValue(bson_iter_t* iter){ if(BSON_ITER_HOLDS_INT32(iter)){ return ScalarInteger(bson_iter_int32(iter)); } else if(BSON_ITER_HOLDS_NULL(iter)){ return R_NilValue; } else if(BSON_ITER_HOLDS_BOOL(iter)){ return ScalarLogical(bson_iter_bool(iter)); } else if(BSON_ITER_HOLDS_DOUBLE(iter)){ return ScalarReal(bson_iter_double(iter)); } else if(BSON_ITER_HOLDS_INT64(iter)){ return ScalarReal((double) bson_iter_int64(iter)); } else if(BSON_ITER_HOLDS_UTF8(iter)){ return mkStringUTF8(bson_iter_utf8(iter, NULL)); } else if(BSON_ITER_HOLDS_CODE(iter)){ return mkStringUTF8(bson_iter_code(iter, NULL)); } else if(BSON_ITER_HOLDS_BINARY(iter)){ return ConvertBinary(iter); } else if(BSON_ITER_HOLDS_DATE_TIME(iter)){ return ConvertDate(iter); } else if(BSON_ITER_HOLDS_OID(iter)){ //not sure if this casting works return mkRaw((unsigned char *) bson_iter_oid(iter), 12); } else if(BSON_ITER_HOLDS_ARRAY(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertArray(&child1, &child2); } else if(BSON_ITER_HOLDS_DOCUMENT(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertObject(&child1, &child2); } else { stop("Unimplemented BSON type %d\n", bson_iter_type(iter)); } }
bool _mongoc_cursor_array_next (mongoc_cursor_t *cursor, const bson_t **bson) { bool ret = true; mongoc_cursor_array_t *arr; bson_iter_t iter; ENTRY; arr = cursor->iface_data; *bson = NULL; if (!arr->has_array) { arr->has_array = true; ret = _mongoc_cursor_next (cursor, &arr->result); if (!(ret && bson_iter_init_find (&iter, arr->result, "result") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &arr->iter) && bson_iter_next (&arr->iter))) { ret = false; } } else { ret = bson_iter_next (&arr->iter); } if (ret) { bson_iter_document (&arr->iter, &arr->document_len, &arr->document); bson_init_static (&arr->bson, arr->document, arr->document_len); *bson = &arr->bson; } RETURN (ret); }
bool _mongoc_cursor_array_prime (mongoc_cursor_t *cursor) { const bson_t *bson; mongoc_cursor_array_t *arr; bson_iter_t iter; ENTRY; arr = (mongoc_cursor_array_t *)cursor->iface_data; BSON_ASSERT (arr); if (_mongoc_cursor_run_command (cursor, &cursor->query) && _mongoc_read_from_buffer (cursor, &bson) && bson_iter_init_find (&iter, bson, arr->field_name) && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &arr->iter)) { arr->has_array = true; } return arr->has_array; }
static void test_bson_iter_recurse (void) { bson_iter_t iter; bson_iter_t child; bson_t b; bson_t cb; bson_init(&b); bson_init(&cb); assert(bson_append_int32(&cb, "0", 1, 0)); assert(bson_append_int32(&cb, "1", 1, 1)); assert(bson_append_int32(&cb, "2", 1, 2)); assert(bson_append_array(&b, "key", -1, &cb)); assert(bson_iter_init_find(&iter, &b, "key")); assert(BSON_ITER_HOLDS_ARRAY(&iter)); assert(bson_iter_recurse(&iter, &child)); assert(bson_iter_find(&child, "0")); assert(bson_iter_find(&child, "1")); assert(bson_iter_find(&child, "2")); assert(!bson_iter_next(&child)); bson_destroy(&b); bson_destroy(&cb); }
void _mongoc_handshake_parse_sasl_supported_mechs ( const bson_t *ismaster, mongoc_handshake_sasl_supported_mechs_t *sasl_supported_mechs) { bson_iter_t iter; memset (sasl_supported_mechs, 0, sizeof (*sasl_supported_mechs)); if (bson_iter_init_find (&iter, ismaster, "saslSupportedMechs")) { bson_iter_t array_iter; if (BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &array_iter)) { while (bson_iter_next (&array_iter)) { if (BSON_ITER_HOLDS_UTF8 (&array_iter)) { const char *mechanism_name = bson_iter_utf8 (&array_iter, NULL); if (0 == strcmp (mechanism_name, "SCRAM-SHA-256")) { sasl_supported_mechs->scram_sha_256 = true; } else if (0 == strcmp (mechanism_name, "SCRAM-SHA-1")) { sasl_supported_mechs->scram_sha_1 = true; } } } } } }
static void test_get_collection_info (void) { mongoc_database_t *database; mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error = { 0 }; bson_iter_t iter; bson_iter_t col_array; bson_iter_t col_iter; bson_t capped_options = BSON_INITIALIZER; bson_t autoindexid_options = BSON_INITIALIZER; bson_t noopts_options = BSON_INITIALIZER; bson_t name_filter = BSON_INITIALIZER; int r; int num_infos = 0; bson_t *infos = NULL; const char *name; char *dbname; char *capped_name; char *autoindexid_name; char *noopts_name; client = mongoc_client_new (gTestUri); assert (client); dbname = gen_collection_name ("dbtest"); database = mongoc_client_get_database (client, dbname); assert (database); bson_free (dbname); capped_name = gen_collection_name ("capped"); BSON_APPEND_BOOL (&capped_options, "capped", true); BSON_APPEND_INT32 (&capped_options, "size", 10000000); BSON_APPEND_INT32 (&capped_options, "max", 1024); autoindexid_name = gen_collection_name ("autoindexid"); BSON_APPEND_BOOL (&autoindexid_options, "autoIndexId", false); noopts_name = gen_collection_name ("noopts"); collection = mongoc_database_create_collection (database, capped_name, &capped_options, &error); assert (collection); mongoc_collection_destroy (collection); collection = mongoc_database_create_collection (database, autoindexid_name, &autoindexid_options, &error); assert (collection); mongoc_collection_destroy (collection); collection = mongoc_database_create_collection (database, noopts_name, &noopts_options, &error); assert (collection); mongoc_collection_destroy (collection); /* first we filter on collection name. */ BSON_APPEND_UTF8 (&name_filter, "name", noopts_name); /* We only test with filters since get_collection_names will * test w/o filters for us. */ /* Filter on an exact match of name */ infos = mongoc_database_get_collection_info (database, &name_filter, &error); assert (infos); assert (!error.domain); assert (!error.code); if (bson_iter_init_find (&iter, infos, "collections") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &col_array)) { while (bson_iter_next (&col_array)) { if (BSON_ITER_HOLDS_DOCUMENT (&col_array) && bson_iter_recurse (&col_array, &col_iter) && bson_iter_find (&col_iter, "name") && BSON_ITER_HOLDS_UTF8 (&col_iter) && (name = bson_iter_utf8 (&col_iter, NULL))) { ++num_infos; assert (0 == strcmp (name, noopts_name)); } else { assert (false); } } } assert (1 == num_infos); num_infos = 0; bson_destroy (infos); infos = NULL; r = mongoc_database_drop (database, &error); assert (r); assert (!error.domain); assert (!error.code); bson_free (capped_name); bson_free (noopts_name); bson_free (autoindexid_name); mongoc_database_destroy (database); mongoc_client_destroy (client); }
static int mongo_list_origins(u08bits *realm, secrets_list_t *origins, secrets_list_t *realms) { mongoc_collection_t * collection = mongo_get_collection("realm"); if(!collection) return -1; u08bits realm0[STUN_MAX_REALM_SIZE+1] = "\0"; if(!realm) realm=realm0; bson_t query, child; bson_init(&query); bson_append_document_begin(&query, "$orderby", -1, &child); BSON_APPEND_INT32(&child, "realm", 1); bson_append_document_end(&query, &child); bson_append_document_begin(&query, "$query", -1, &child); if (realm && realm[0]) { BSON_APPEND_UTF8(&child, "realm", (const char *)realm); } bson_append_document_end(&query, &child); bson_t fields; bson_init(&fields); BSON_APPEND_INT32(&fields, "origin", 1); BSON_APPEND_INT32(&fields, "realm", 1); mongoc_cursor_t * cursor; cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, &fields, NULL); int ret = -1; if (!cursor) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error querying MongoDB collection 'realm'\n"); } else { const bson_t * item; uint32_t length; bson_iter_t iter; while (mongoc_cursor_next(cursor, &item)) { if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "realm") && BSON_ITER_HOLDS_UTF8(&iter)) { const char * _realm = bson_iter_utf8(&iter, &length); if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "origin") && BSON_ITER_HOLDS_ARRAY(&iter)) { const uint8_t *docbuf = NULL; uint32_t doclen = 0; bson_t origin_array; bson_iter_t origin_iter; bson_iter_array(&iter, &doclen, &docbuf); bson_init_static(&origin_array, docbuf, doclen); if (bson_iter_init(&origin_iter, &origin_array)) { while(bson_iter_next(&origin_iter)) { if (BSON_ITER_HOLDS_UTF8(&origin_iter)) { const char * _origin = bson_iter_utf8(&origin_iter, &length); if(origins) { add_to_secrets_list(origins,_origin); if(realms) { add_to_secrets_list(realms,_realm); } } else { printf("%s ==>> %s\n", _realm, _origin); } } } } } } } mongoc_cursor_destroy(cursor); ret = 0; } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&fields); return ret; }
static int mongo_read_realms_ip_lists(const char *kind, ip_range_list_t * list) { int ret = 0; char field_name[129]; sprintf(field_name, "%s_peer_ip", kind); mongoc_collection_t * collection = mongo_get_collection("realm"); if (!collection) return ret; bson_t query; bson_init(&query); bson_t fields; bson_init(&fields); BSON_APPEND_INT32(&fields, "realm", 1); BSON_APPEND_INT32(&fields, field_name, 1); mongoc_cursor_t * cursor; cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, &fields, NULL); if (!cursor) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error querying MongoDB collection 'realm'\n"); ret = -1; } else { const bson_t * item; uint32_t length; bson_iter_t iter; char realm[513]; while (mongoc_cursor_next(cursor, &item)) { if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "realm") && BSON_ITER_HOLDS_UTF8(&iter)) { STRCPY(realm,bson_iter_utf8(&iter, &length)); if (bson_iter_init(&iter, item) && bson_iter_find(&iter, field_name) && BSON_ITER_HOLDS_ARRAY(&iter)) { const uint8_t *docbuf = NULL; uint32_t doclen = 0; bson_t ip_range_array; bson_iter_t ip_range_iter; bson_iter_array(&iter, &doclen, &docbuf); bson_init_static(&ip_range_array, docbuf, doclen); if (bson_iter_init(&ip_range_iter, &ip_range_array)) { while (bson_iter_next(&ip_range_iter)) { if (BSON_ITER_HOLDS_UTF8(&ip_range_iter)) { const char* ip_range = bson_iter_utf8(&ip_range_iter, &length); add_ip_list_range(ip_range, realm, list); } } } } } } mongoc_cursor_destroy(cursor); } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&fields); return ret; }
/* construct the aggregate command in cmd. looks like one of the following: * for a collection change stream: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } * for a database change stream: * { aggregate: 1, pipeline: [], cursor: { batchSize: x } } * for a client change stream: * { aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], * cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); if (stream->change_stream_type == MONGOC_CHANGE_STREAM_COLLECTION) { bson_append_utf8 ( command, "aggregate", 9, stream->coll, (int) strlen (stream->coll)); } else { bson_append_int32 (command, "aggregate", 9, 1); } bson_append_array_begin (command, "pipeline", 8, &pipeline); /* append the $changeStream stage. */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } /* Change streams spec: "startAtOperationTime and resumeAfter are mutually * exclusive; if both startAtOperationTime and resumeAfter are set, the * server will return an error. Drivers MUST NOT throw a custom error, and * MUST defer to the server error." */ if (!_mongoc_timestamp_empty (&stream->operation_time)) { _mongoc_timestamp_append ( &stream->operation_time, &change_stream_doc, "startAtOperationTime"); } if (stream->change_stream_type == MONGOC_CHANGE_STREAM_CLIENT) { bson_append_bool (&change_stream_doc, "allChangesForCluster", 20, true); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* the user pipeline may consist of invalid stages or non-documents. * append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays ( offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); bson_init_static (&write_concern_error, data, len); bson_uint32_to_string ( result->n_writeConcernErrors, &key, str, sizeof str); if (!bson_append_document ( &result->writeConcernErrors, key, -1, &write_concern_error)) { MONGOC_ERROR ("Error adding \"%s\" to writeConcernErrors.\n", key); } result->n_writeConcernErrors++; } /* inefficient if there are ever large numbers: for each label in each err, * we linear-search result->errorLabels to see if it's included yet */ _mongoc_bson_array_copy_labels_to (reply, &result->errorLabels); EXIT; }
void mongoc_server_description_handle_ismaster ( mongoc_server_description_t *sd, const bson_t *ismaster_response, int64_t rtt_msec, bson_error_t *error) { bson_iter_t iter; bool is_master = false; bool is_shard = false; bool is_secondary = false; bool is_arbiter = false; bool is_replicaset = false; bool is_hidden = false; const uint8_t *bytes; uint32_t len; int num_keys = 0; ENTRY; BSON_ASSERT (sd); mongoc_server_description_reset (sd); if (!ismaster_response) { EXIT; } bson_destroy (&sd->last_is_master); bson_copy_to (ismaster_response, &sd->last_is_master); sd->has_is_master = true; bson_iter_init (&iter, &sd->last_is_master); while (bson_iter_next (&iter)) { num_keys++; if (strcmp ("ok", bson_iter_key (&iter)) == 0) { /* ismaster responses never have ok: 0, but spec requires we check */ if (! bson_iter_as_bool (&iter)) goto failure; } else if (strcmp ("ismaster", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_master = bson_iter_bool (&iter); } else if (strcmp ("maxMessageSizeBytes", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_msg_size = bson_iter_int32 (&iter); } else if (strcmp ("maxBsonObjectSize", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_bson_obj_size = bson_iter_int32 (&iter); } else if (strcmp ("maxWriteBatchSize", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_write_batch_size = bson_iter_int32 (&iter); } else if (strcmp ("minWireVersion", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->min_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("maxWireVersion", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("msg", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; is_shard = !!bson_iter_utf8 (&iter, NULL); } else if (strcmp ("setName", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->set_name = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("secondary", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_secondary = bson_iter_bool (&iter); } else if (strcmp ("hosts", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->hosts, bytes, len); } else if (strcmp ("passives", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->passives, bytes, len); } else if (strcmp ("arbiters", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_init_static (&sd->arbiters, bytes, len); } else if (strcmp ("primary", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->current_primary = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("arbiterOnly", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_arbiter = bson_iter_bool (&iter); } else if (strcmp ("isreplicaset", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_replicaset = bson_iter_bool (&iter); } else if (strcmp ("tags", bson_iter_key (&iter)) == 0) { if (! BSON_ITER_HOLDS_DOCUMENT (&iter)) goto failure; bson_iter_document (&iter, &len, &bytes); bson_init_static (&sd->tags, bytes, len); } else if (strcmp ("hidden", bson_iter_key (&iter)) == 0) { is_hidden = bson_iter_bool (&iter); } } if (is_shard) { sd->type = MONGOC_SERVER_MONGOS; } else if (sd->set_name) { if (is_hidden) { sd->type = MONGOC_SERVER_RS_OTHER; } else if (is_master) { sd->type = MONGOC_SERVER_RS_PRIMARY; } else if (is_secondary) { sd->type = MONGOC_SERVER_RS_SECONDARY; } else if (is_arbiter) { sd->type = MONGOC_SERVER_RS_ARBITER; } else { sd->type = MONGOC_SERVER_RS_OTHER; } } else if (is_replicaset) { sd->type = MONGOC_SERVER_RS_GHOST; } else if (num_keys > 0) { sd->type = MONGOC_SERVER_STANDALONE; } else { sd->type = MONGOC_SERVER_UNKNOWN; } mongoc_server_description_update_rtt(sd, rtt_msec); EXIT; failure: sd->type = MONGOC_SERVER_UNKNOWN; sd->round_trip_time = -1; EXIT; }
static void test_index_offset (void) { mongoc_bulk_operation_t *bulk; mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; bson_t reply; bson_t *sel; bson_t *doc; bool r; client = mongoc_client_new (gTestUri); assert (client); collection = get_test_collection (client, "test_index_offset"); assert (collection); doc = bson_new (); BSON_APPEND_INT32 (doc, "abcd", 1234); r = mongoc_collection_insert (collection, MONGOC_INSERT_NONE, doc, NULL, &error); assert (r); bson_destroy (doc); bulk = mongoc_collection_create_bulk_operation (collection, true, NULL); assert (bulk); sel = BCON_NEW ("abcd", BCON_INT32 (1234)); doc = BCON_NEW ("$set", "{", "hello", "there", "}"); mongoc_bulk_operation_delete_one (bulk, sel); mongoc_bulk_operation_update (bulk, sel, doc, true); r = mongoc_bulk_operation_execute (bulk, &reply, &error); assert (r); assert (bson_iter_init_find (&iter, &reply, "nUpserted")); assert (BSON_ITER_HOLDS_INT32 (&iter)); assert (bson_iter_int32 (&iter) == 1); assert (bson_iter_init_find (&iter, &reply, "nMatched")); assert (BSON_ITER_HOLDS_INT32 (&iter)); assert (bson_iter_int32 (&iter) == 0); assert (bson_iter_init_find (&iter, &reply, "nRemoved")); assert (BSON_ITER_HOLDS_INT32 (&iter)); assert (bson_iter_int32 (&iter) == 1); assert (bson_iter_init_find (&iter, &reply, "nInserted")); assert (BSON_ITER_HOLDS_INT32 (&iter)); assert (bson_iter_int32 (&iter) == 0); if (bson_iter_init_find (&iter, &reply, "nModified")) { assert (BSON_ITER_HOLDS_INT32 (&iter)); assert (bson_iter_int32 (&iter) == 0); } assert (bson_iter_init_find (&iter, &reply, "upserted")); assert (BSON_ITER_HOLDS_ARRAY (&iter)); assert (bson_iter_recurse (&iter, &ar)); assert (bson_iter_next (&ar)); assert (BSON_ITER_HOLDS_DOCUMENT (&ar)); assert (bson_iter_recurse (&ar, &citer)); assert (bson_iter_next (&citer)); assert (BSON_ITER_IS_KEY (&citer, "index")); assert (bson_iter_int32 (&citer) == 1); assert (bson_iter_next (&citer)); assert (BSON_ITER_IS_KEY (&citer, "_id")); assert (BSON_ITER_HOLDS_OID (&citer)); assert (!bson_iter_next (&citer)); assert (!bson_iter_next (&ar)); assert (bson_iter_init_find (&iter, &reply, "writeErrors")); assert (BSON_ITER_HOLDS_ARRAY (&iter)); assert (bson_iter_recurse (&iter, &ar)); assert (!bson_iter_next (&ar)); bson_destroy (&reply); r = mongoc_collection_drop (collection, &error); assert (r); mongoc_bulk_operation_destroy (bulk); mongoc_collection_destroy (collection); mongoc_client_destroy (client); bson_destroy (doc); bson_destroy (sel); }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }
void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert (result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } /* * SERVER-13001 - in a mixed sharded cluster a call to update could * return nModified (>= 2.6) or not (<= 2.4). If any call does not * return nModified we can't report a valid final count so omit the * field completely. */ if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } else { /* * nModified could be BSON_TYPE_NULL, which should also be omitted. */ result->omit_nModified = true; } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); bson_init_static (&write_concern_error, data, len); bson_uint32_to_string (result->n_writeConcernErrors, &key, str, sizeof str); bson_append_document (&result->writeConcernErrors, key, -1, &write_concern_error); result->n_writeConcernErrors++; } EXIT; }
void _mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ mongoc_error_code_t default_code, uint32_t offset) { const bson_value_t *value; bson_t holder, write_errors, child; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; const char *err = NULL; int32_t code = 0; int32_t n = 0; int32_t upsert_idx = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { n = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "err") && BSON_ITER_HOLDS_UTF8 (&iter)) { err = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find (&iter, reply, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (code || err) { if (!code) { code = default_code; } if (!err) { err = "unknown error"; } bson_set_error (&result->error, MONGOC_ERROR_COLLECTION, code, "%s", err); result->failed = true; bson_init(&holder); bson_append_array_begin(&holder, "0", 1, &write_errors); bson_append_document_begin(&write_errors, "0", 1, &child); bson_append_int32(&child, "index", 5, 0); bson_append_int32(&child, "code", 4, code); bson_append_utf8(&child, "errmsg", 6, err, -1); bson_append_document_end(&write_errors, &child); bson_append_array_end(&holder, &write_errors); bson_iter_init(&iter, &holder); bson_iter_next(&iter); _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors, &iter); bson_destroy(&holder); } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: if (n) { result->nInserted += n; } break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += n; break; case MONGOC_WRITE_COMMAND_UPDATE: if (bson_iter_init_find (&iter, reply, "upserted") && !BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; value = bson_iter_value (&iter); _mongoc_write_result_append_upsert (result, offset, value); } else if (bson_iter_init_find (&iter, reply, "upserted") && BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; if (bson_iter_recurse (&iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert (result, offset + upsert_idx, value); upsert_idx++; } } } } else if ((n == 1) && bson_iter_init_find (&iter, reply, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) { result->nUpserted += n; } else { result->nMatched += n; } break; default: break; } result->omit_nModified = true; EXIT; }
void _mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ int32_t error_api_version, mongoc_error_code_t default_code, uint32_t offset) { const bson_value_t *value; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; const char *err = NULL; int32_t code = 0; int32_t n = 0; int32_t upsert_idx = 0; mongoc_error_domain_t domain; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_COLLECTION; if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { n = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "err") && BSON_ITER_HOLDS_UTF8 (&iter)) { err = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find (&iter, reply, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (_is_duplicate_key_error (code)) { code = MONGOC_ERROR_DUPLICATE_KEY; } if (code || err) { if (!err) { err = "unknown error"; } if (bson_iter_init_find (&iter, reply, "wtimeout") && bson_iter_as_bool (&iter)) { if (!code) { code = (int32_t) MONGOC_ERROR_WRITE_CONCERN_ERROR; } _append_write_concern_err_legacy (result, err, code); } else { if (!code) { code = (int32_t) default_code; } _append_write_err_legacy (result, err, domain, code, offset); } } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: if (n) { result->nInserted += n; } break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += n; break; case MONGOC_WRITE_COMMAND_UPDATE: if (bson_iter_init_find (&iter, reply, "upserted") && !BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; value = bson_iter_value (&iter); _mongoc_write_result_append_upsert (result, offset, value); } else if (bson_iter_init_find (&iter, reply, "upserted") && BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; if (bson_iter_recurse (&iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + upsert_idx, value); upsert_idx++; } } } } else if ((n == 1) && bson_iter_init_find (&iter, reply, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) { result->nUpserted += n; } else { result->nMatched += n; } break; default: break; } result->omit_nModified = true; EXIT; }
struct transaction* transaction_from_bson(bson_t const* doc) { char key[9]; bson_iter_t iter; bson_iter_t subiter; struct transaction* tx = transaction_new(); if(!bson_iter_init_find(&iter, doc, "version") || !BSON_ITER_HOLDS_INT32(&iter)) goto error; transaction_set_version(tx, bson_iter_int32(&iter)); // Read Inputs if(!bson_iter_init_find(&iter, doc, "inputs") || !BSON_ITER_HOLDS_ARRAY(&iter)) goto error; uint32_t inputs_doc_length; uint8_t const* inputs_doc_data; bson_iter_array(&iter, &inputs_doc_length, &inputs_doc_data); bson_t inputs_doc; bson_init_static(&inputs_doc, inputs_doc_data, inputs_doc_length); size_t index = 0; for(;;) { bson_snprintf(key, sizeof(key), "%u", (unsigned int)index); key[sizeof(key) - 1] = '\0'; // If the array key isn't found, then we reached the end of the array if(!bson_iter_init_find(&subiter, &inputs_doc, key)) break; // If it's not a document, then there's an error if(!BSON_ITER_HOLDS_DOCUMENT(&subiter)) goto error; struct transaction_input* input = transaction_input_new(); struct transaction_output_reference* output_reference = transaction_input_output_reference(input); // Load the input document bson_t element_doc; uint32_t element_doc_length; uint8_t const* element_doc_data; bson_iter_document(&subiter, &element_doc_length, &element_doc_data); bson_init_static(&element_doc, element_doc_data, element_doc_length); bson_iter_t elementiter; // Output reference if(!bson_iter_init_find(&elementiter, &element_doc, "output_reference") || !BSON_ITER_HOLDS_DOCUMENT(&elementiter)) goto error; bson_t output_reference_doc; uint32_t output_reference_doc_length; uint8_t const* output_reference_doc_data; bson_iter_document(&elementiter, &output_reference_doc_length, &output_reference_doc_data); bson_init_static(&output_reference_doc, output_reference_doc_data, output_reference_doc_length); bson_iter_t output_reference_iter; uint8_t const* hash; uint32_t hash_size; if(!bson_iter_init_find(&output_reference_iter, &output_reference_doc, "hash") || !BSON_ITER_HOLDS_BINARY(&output_reference_iter)) goto error; bson_iter_binary(&output_reference_iter, BSON_SUBTYPE_BINARY, &hash_size, &hash); assert(hash_size == 32); transaction_output_reference_set_hash(output_reference, (unsigned char const*)hash); if(!bson_iter_init_find(&output_reference_iter, &output_reference_doc, "index") || !BSON_ITER_HOLDS_INT32(&output_reference_iter)) goto error; transaction_output_reference_set_index(output_reference, bson_iter_int32(&output_reference_iter)); // Script if(!bson_iter_init_find(&elementiter, &element_doc, "script") || !BSON_ITER_HOLDS_BINARY(&elementiter)) goto error; uint32_t script_size; uint8_t const* script_data; bson_iter_binary(&elementiter, BSON_SUBTYPE_BINARY, &script_size, &script_data); struct script* script; size_t script_size_result; script_size_result = unserialize_script((unsigned char const*)script_data, script_size, &script, script_size); assert(script_size_result == script_size); transaction_input_set_script(input, script); // Sequence if(!bson_iter_init_find(&elementiter, &element_doc, "sequence") || !BSON_ITER_HOLDS_INT32(&elementiter)) goto error; transaction_input_set_sequence(input, bson_iter_int32(&elementiter)); transaction_add_input(tx, input); index += 1; } // Read Outputs if(!bson_iter_init_find(&iter, doc, "outputs") || !BSON_ITER_HOLDS_ARRAY(&iter)) goto error; uint32_t outputs_doc_length; uint8_t const* outputs_doc_data; bson_iter_array(&iter, &outputs_doc_length, &outputs_doc_data); bson_t outputs_doc; bson_init_static(&outputs_doc, outputs_doc_data, outputs_doc_length); index = 0; for(;;) { bson_snprintf(key, sizeof(key), "%u", (unsigned int)index); key[sizeof(key) - 1] = '\0'; // If the array key isn't found, then we reached the end of the array if(!bson_iter_init_find(&subiter, &outputs_doc, key)) break; // If it's not a document, then there's an error if(!BSON_ITER_HOLDS_DOCUMENT(&subiter)) goto error; struct transaction_output* output = transaction_output_new(); // Load the output document bson_t element_doc; uint32_t element_doc_length; uint8_t const* element_doc_data; bson_iter_document(&subiter, &element_doc_length, &element_doc_data); bson_init_static(&element_doc, element_doc_data, element_doc_length); bson_iter_t elementiter; // Value if(!bson_iter_init_find(&elementiter, &element_doc, "value") || !BSON_ITER_HOLDS_INT64(&elementiter)) goto error; transaction_output_set_value(output, bson_iter_int64(&elementiter)); // Script if(!bson_iter_init_find(&elementiter, &element_doc, "script") || !BSON_ITER_HOLDS_BINARY(&elementiter)) goto error; uint32_t script_size; uint8_t const* script_data; bson_iter_binary(&elementiter, BSON_SUBTYPE_BINARY, &script_size, &script_data); struct script* script; size_t script_size_result; script_size_result = unserialize_script((unsigned char const*)script_data, script_size, &script, script_size); assert(script_size_result == script_size); transaction_output_set_script(output, script); transaction_add_output(tx, output); index += 1; } if(!bson_iter_init_find(&iter, doc, "lock_time") || !BSON_ITER_HOLDS_INT32(&iter)) goto error; transaction_set_lock_time(tx, bson_iter_int32(&iter)); return tx; error: return NULL; }
static void read_prefs_handler (mock_server_t *server, mongoc_stream_t *stream, mongoc_rpc_t *rpc, void *user_data) { bool *success = user_data; int32_t len; bson_iter_t iter; bson_iter_t child; bson_iter_t child2; bson_iter_t child3; bson_t b; bson_t reply = BSON_INITIALIZER; int r; if (rpc->header.opcode == MONGOC_OPCODE_QUERY) { memcpy (&len, rpc->query.query, 4); len = BSON_UINT32_FROM_LE (len); r = bson_init_static (&b, rpc->query.query, len); assert (r); r = bson_iter_init_find (&iter, &b, "$query"); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); r = bson_iter_init_find (&iter, &b, "$readPreference"); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); r = bson_iter_recurse (&iter, &child); assert (r); r = bson_iter_next (&child); assert (r); assert (BSON_ITER_HOLDS_UTF8 (&child)); assert (!strcmp ("mode", bson_iter_key (&child))); assert (!strcmp ("secondaryPreferred", bson_iter_utf8 (&child, NULL))); r = bson_iter_next (&child); assert (r); assert (BSON_ITER_HOLDS_ARRAY (&child)); r = bson_iter_recurse (&child, &child2); assert (r); r = bson_iter_next (&child2); assert (r); assert (BSON_ITER_HOLDS_DOCUMENT (&child2)); r = bson_iter_recurse (&child2, &child3); assert (r); r = bson_iter_next (&child3); assert (r); assert (BSON_ITER_HOLDS_UTF8 (&child3)); assert (!strcmp ("dc", bson_iter_key (&child3))); assert (!strcmp ("ny", bson_iter_utf8 (&child3, NULL))); r = bson_iter_next (&child3); assert (!r); r = bson_iter_next (&child2); assert (r); r = bson_iter_recurse (&child2, &child3); assert (r); r = bson_iter_next (&child3); assert (!r); mock_server_reply_simple (server, stream, rpc, MONGOC_REPLY_NONE, &reply); *success = true; } }