/************************************************************************************************************************* F_SECUNDARIA: Obteniendo el VALOR dlong int IP, puerto o numBloque para una copiaX de un bloque X TESTEADO =) ****************************/ long int getValorDeUnaCopia_ArchivoDB (const bson_t * documento_X, int numBloque, int numDeCopia, char * claveRequerida) { bson_iter_t punteroDeBusqueda; bson_iter_t valorEncontrado; long int valorRequerido; // GENERO LA CLAVE DE BUSQUEDA //Ejemplo "Bloqlong int.IP" Bloques.numBloque.ArrayDeCopias.numDeCopia.claveRequerida (clavlong int IP o puerto o numDeBloque) char *claveGenerada = string_new(); string_append(&claveGenerada, "Bloques"); string_append(&claveGenerada, "."); string_append(&claveGenerada, string_itoa(numBloque)); string_append(&claveGenerada, ".copias."); string_append(&claveGenerada, string_itoa(numDeCopia-1)); string_append(&claveGenerada, "."); string_append(&claveGenerada, claveRequerida); if (documento_X != NULL && bson_iter_init (&punteroDeBusqueda, documento_X) && bson_iter_find_descendant (&punteroDeBusqueda, claveGenerada, &valorEncontrado) && BSON_ITER_HOLDS_INT32 (&valorEncontrado)) { valorRequerido = bson_iter_int32 (&valorEncontrado); } free(claveGenerada); return valorRequerido; }
//Devuelve un num positivo o Devuelve 0 si hay error int getCantidadDeCopiasDe1Bloque_ArchivoDB(const bson_t * documento_X, int numBloque) { bson_iter_t punteroDeBusqueda; bson_iter_t valorEncontrado; int CantidadDeCopias = 0; char *claveGenerada = string_new(); string_append(&claveGenerada, "Bloques"); string_append(&claveGenerada, "."); string_append(&claveGenerada, string_itoa(numBloque)); string_append(&claveGenerada, "."); string_append(&claveGenerada, "cantidadDeCopias"); if (documento_X != NULL && bson_iter_init (&punteroDeBusqueda, documento_X) && bson_iter_find_descendant (&punteroDeBusqueda, claveGenerada, &valorEncontrado) && BSON_ITER_HOLDS_INT32 (&valorEncontrado)) { CantidadDeCopias = bson_iter_int32 (&valorEncontrado); } free(claveGenerada); return CantidadDeCopias; }
static void test_write_concern_bson_omits_defaults (void) { mongoc_write_concern_t *write_concern; const bson_t *gle; const bson_t *bson; bson_iter_t iter; write_concern = mongoc_write_concern_new(); /* * Check generated bson. */ ASSERT(write_concern); gle = _mongoc_write_concern_get_gle(write_concern); ASSERT(bson_iter_init_find(&iter, gle, "getlasterror") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 1); ASSERT(!bson_iter_init_find(&iter, gle, "fsync")); ASSERT(!bson_iter_init_find(&iter, gle, "j")); ASSERT(!bson_iter_init_find(&iter, gle, "w")); ASSERT(gle); bson = _mongoc_write_concern_get_bson(write_concern); ASSERT(!bson_iter_init_find(&iter, bson, "getlasterror")); ASSERT(!bson_iter_init_find(&iter, bson, "fsync")); ASSERT(!bson_iter_init_find(&iter, bson, "j")); ASSERT(!bson_iter_init_find(&iter, gle, "w")); ASSERT(bson); mongoc_write_concern_destroy(write_concern); }
static void _mongoc_populate_query_error (const bson_t *doc, int32_t error_api_version, bson_error_t *error) { mongoc_error_domain_t domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_QUERY; uint32_t code = MONGOC_ERROR_QUERY_FAILURE; bson_iter_t iter; const char *msg = "Unknown query failure"; ENTRY; BSON_ASSERT (doc); if (bson_iter_init_find (&iter, doc, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = (uint32_t) bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, doc, "$err") && BSON_ITER_HOLDS_UTF8 (&iter)) { msg = bson_iter_utf8 (&iter, NULL); } bson_set_error (error, domain, code, "%s", msg); EXIT; }
static void test_write_concern_bson_includes_false_fsync_and_journal (void) { mongoc_write_concern_t *write_concern; const bson_t *gle; const bson_t *bson; bson_iter_t iter; write_concern = mongoc_write_concern_new(); /* * Check generated bson. */ ASSERT(write_concern); mongoc_write_concern_set_fsync(write_concern, false); mongoc_write_concern_set_journal(write_concern, false); gle = _mongoc_write_concern_get_gle(write_concern); ASSERT(bson_iter_init_find(&iter, gle, "getlasterror") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 1); ASSERT(bson_iter_init_find(&iter, gle, "fsync") && BSON_ITER_HOLDS_BOOL(&iter) && !bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, gle, "j") && BSON_ITER_HOLDS_BOOL(&iter) && !bson_iter_bool(&iter)); ASSERT(!bson_iter_init_find(&iter, gle, "w")); ASSERT(gle); bson = _mongoc_write_concern_get_bson(write_concern); ASSERT(!bson_iter_init_find(&iter, bson, "getlasterror")); ASSERT(bson_iter_init_find(&iter, bson, "fsync") && BSON_ITER_HOLDS_BOOL(&iter) && !bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, bson, "j") && BSON_ITER_HOLDS_BOOL(&iter) && !bson_iter_bool(&iter)); ASSERT(!bson_iter_init_find(&iter, bson, "w")); ASSERT(bson); mongoc_write_concern_destroy(write_concern); }
bool _mongoc_convert_validate_flags (mongoc_client_t *client, const bson_iter_t *iter, bson_validate_flags_t *flags, bson_error_t *error) { if (BSON_ITER_HOLDS_BOOL (iter)) { if (!bson_iter_as_bool (iter)) { *flags = BSON_VALIDATE_NONE; return true; } else { /* validate: false is ok but validate: true is prohibited */ CONVERSION_ERR ("Invalid option \"%s\": true, must be a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter)); } } else if (BSON_ITER_HOLDS_INT32 (iter)) { if (bson_iter_int32 (iter) <= 0x1F) { *flags = (bson_validate_flags_t) bson_iter_int32 (iter); return true; } else { CONVERSION_ERR ("Invalid field \"%s\" in opts, must be a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter)); } } CONVERSION_ERR ("Invalid type for option \"%s\": \"%s\"." " \"%s\" must be a boolean or a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter)), bson_iter_key (iter)); }
bool mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option, int32_t value) { const bson_t *options; bson_iter_t iter; BSON_ASSERT (option); if (!mongoc_uri_option_is_int32 (option)) { return false; } if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { bson_iter_overwrite_int32 (&iter, value); return true; } else { return false; } } bson_append_int32(&uri->options, option, -1, value); return true; }
static void test_bson_iter_overwrite_int32 (void) { bson_iter_t iter; bson_t b; bson_init(&b); assert(bson_append_int32(&b, "key", -1, 1234)); assert(bson_iter_init_find(&iter, &b, "key")); assert(BSON_ITER_HOLDS_INT32(&iter)); bson_iter_overwrite_int32(&iter, 4321); assert(bson_iter_init_find(&iter, &b, "key")); assert(BSON_ITER_HOLDS_INT32(&iter)); assert_cmpint(bson_iter_int32(&iter), ==, 4321); bson_destroy(&b); }
static void _mongoc_cursor_populate_error (mongoc_cursor_t *cursor, const bson_t *doc, bson_error_t *error) { bson_uint32_t code = MONGOC_ERROR_QUERY_FAILURE; bson_iter_t iter; const char *msg = "Unknown query failure"; BSON_ASSERT (cursor); BSON_ASSERT (doc); BSON_ASSERT (error); if (bson_iter_init_find (&iter, doc, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, doc, "$err") && BSON_ITER_HOLDS_UTF8 (&iter)) { msg = bson_iter_utf8 (&iter, NULL); } if (cursor->is_command && bson_iter_init_find (&iter, doc, "errmsg") && BSON_ITER_HOLDS_UTF8 (&iter)) { msg = bson_iter_utf8 (&iter, NULL); } bson_set_error(error, MONGOC_ERROR_QUERY, code, "%s", msg); }
static void test_write_concern_basic (void) { mongoc_write_concern_t *write_concern; const bson_t *b; bson_iter_t iter; write_concern = mongoc_write_concern_new(); /* * Test defaults. */ assert(write_concern); assert(!mongoc_write_concern_get_fsync(write_concern)); assert(!mongoc_write_concern_get_journal(write_concern)); assert(mongoc_write_concern_get_w(write_concern) == MONGOC_WRITE_CONCERN_W_DEFAULT); assert(!mongoc_write_concern_get_wtimeout(write_concern)); assert(!mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_fsync(write_concern, TRUE); assert(mongoc_write_concern_get_fsync(write_concern)); mongoc_write_concern_set_fsync(write_concern, FALSE); assert(!mongoc_write_concern_get_fsync(write_concern)); mongoc_write_concern_set_journal(write_concern, TRUE); assert(mongoc_write_concern_get_journal(write_concern)); mongoc_write_concern_set_journal(write_concern, FALSE); assert(!mongoc_write_concern_get_journal(write_concern)); /* * Test changes to w. */ mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY); assert(mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT); assert(!mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_wmajority(write_concern, 1000); assert(mongoc_write_concern_get_wmajority(write_concern)); assert(mongoc_write_concern_get_wtimeout(write_concern) == 1000); mongoc_write_concern_set_wtimeout(write_concern, 0); assert(!mongoc_write_concern_get_wtimeout(write_concern)); mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT); assert(mongoc_write_concern_get_w(write_concern) == MONGOC_WRITE_CONCERN_W_DEFAULT); mongoc_write_concern_set_w(write_concern, 3); assert(mongoc_write_concern_get_w(write_concern) == 3); /* * Check generated bson. */ mongoc_write_concern_set_fsync(write_concern, TRUE); mongoc_write_concern_set_journal(write_concern, TRUE); b = _mongoc_write_concern_freeze(write_concern); assert(bson_iter_init_find(&iter, b, "fsync") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); assert(bson_iter_init_find(&iter, b, "j") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); assert(bson_iter_init_find(&iter, b, "w") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 3); assert(b); mongoc_write_concern_destroy(write_concern); }
static int mongo_get_oauth_key(const u08bits *kid, oauth_key_data_raw *key) { mongoc_collection_t * collection = mongo_get_collection("oauth_key"); if (!collection) return -1; bson_t query; bson_init(&query); BSON_APPEND_UTF8(&query, "kid", (const char *)kid); bson_t fields; bson_init(&fields); BSON_APPEND_INT32(&fields, "lifetime", 1); BSON_APPEND_INT32(&fields, "timestamp", 1); BSON_APPEND_INT32(&fields, "as_rs_alg", 1); BSON_APPEND_INT32(&fields, "ikm_key", 1); mongoc_cursor_t * cursor; cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 1, 0, &query, &fields, NULL); int ret = -1; ns_bzero(key,sizeof(oauth_key_data_raw)); STRCPY(key->kid,kid); if (!cursor) { TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Error querying MongoDB collection 'oauth_key'\n"); } else { const bson_t * item; uint32_t length; bson_iter_t iter; if (mongoc_cursor_next(cursor, &item)) { if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "as_rs_alg") && BSON_ITER_HOLDS_UTF8(&iter)) { STRCPY(key->as_rs_alg,bson_iter_utf8(&iter, &length)); } if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "ikm_key") && BSON_ITER_HOLDS_UTF8(&iter)) { STRCPY(key->ikm_key,bson_iter_utf8(&iter, &length)); } if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "timestamp") && BSON_ITER_HOLDS_INT64(&iter)) { key->timestamp = (u64bits)bson_iter_int64(&iter); } if (bson_iter_init(&iter, item) && bson_iter_find(&iter, "lifetime") && BSON_ITER_HOLDS_INT32(&iter)) { key->lifetime = (u32bits)bson_iter_int32(&iter); } ret = 0; } mongoc_cursor_destroy(cursor); } mongoc_collection_destroy(collection); bson_destroy(&query); bson_destroy(&fields); return ret; }
mongoc_client_pool_t * mongoc_client_pool_new (const mongoc_uri_t *uri) { mongoc_topology_t *topology; mongoc_client_pool_t *pool; const bson_t *b; bson_iter_t iter; ENTRY; BSON_ASSERT (uri); pool = (mongoc_client_pool_t *)bson_malloc0(sizeof *pool); mongoc_mutex_init(&pool->mutex); _mongoc_queue_init(&pool->queue); pool->uri = mongoc_uri_copy(uri); pool->min_pool_size = 0; pool->max_pool_size = 100; pool->size = 0; topology = mongoc_topology_new(uri, false); pool->topology = topology; b = mongoc_uri_get_options(pool->uri); if (bson_iter_init_find_case(&iter, b, "minpoolsize")) { if (BSON_ITER_HOLDS_INT32(&iter)) { pool->min_pool_size = BSON_MAX(0, bson_iter_int32(&iter)); } } if (bson_iter_init_find_case(&iter, b, "maxpoolsize")) { if (BSON_ITER_HOLDS_INT32(&iter)) { pool->max_pool_size = BSON_MAX(1, bson_iter_int32(&iter)); } } mongoc_counter_client_pools_active_inc(); RETURN(pool); }
int database_find_blockchain_transaction(struct database* db, unsigned char* hash, size_t max_height, struct transaction** tx, size_t* height) { mongoc_collection_t* collection = mongoc_client_get_collection(db->client, database_name(db), "transactions"); // Build a query doc bson_t* query = bson_new(); // Set the hash BSON_APPEND_BINARY(query, "hash", BSON_SUBTYPE_BINARY, (uint8_t*)hash, 32); // Force the height to be valid (on the main chain) bson_t* height_doc = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(query, "height", height_doc); BSON_APPEND_INT32(height_doc, "$lte", (int)max_height); BSON_APPEND_INT32(height_doc, "$gte", 0); bson_append_document_end(query, height_doc); // Perform find mongoc_cursor_t* cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); bson_error_t error; if(cursor == NULL || mongoc_cursor_error(cursor, &error)) { printf("MongoDB error: %s\n", (cursor == NULL) ? "NULL cursor" : error.message); return -1; } bson_t const* doc; int found = 0; while(mongoc_cursor_next(cursor, &doc) != 0) { if(height != NULL) { bson_iter_t iter; if(!bson_iter_init_find(&iter, doc, "height") || !BSON_ITER_HOLDS_INT32(&iter)) { printf("MongoDB error: tx doesn't have height!\n"); return -1; } *height = (size_t)bson_iter_int32(&iter); } if(tx != NULL) { *tx = transaction_from_bson(doc); } found = 1; break; } mongoc_cursor_destroy(cursor); bson_destroy(height_doc); bson_destroy(query); mongoc_collection_destroy(collection); return found; }
static void test_bson_iter_mixed (void) { bson_iter_t iter; bson_decimal128_t iter_value; bson_decimal128_t value; bson_t *b; bson_t *b2; b = bson_new(); b2 = bson_new(); value.high = 0; value.low = 1; assert(bson_append_utf8(b2, "foo", -1, "bar", -1)); assert(bson_append_code(b, "0", -1, "var a = {};")); assert(bson_append_code_with_scope(b, "1", -1, "var b = {};", b2)); assert(bson_append_int32(b, "2", -1, 1234)); assert(bson_append_int64(b, "3", -1, 4567)); assert(bson_append_time_t(b, "4", -1, 123456)); assert(bson_append_decimal128(b, "5", -1, &value)); assert(bson_iter_init(&iter, b)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_CODE(&iter)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_CODEWSCOPE(&iter)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_INT32(&iter)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_INT64(&iter)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_DATE_TIME(&iter)); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_DECIMAL128(&iter)); assert(!bson_iter_next(&iter)); assert(bson_iter_init_find(&iter, b, "3")); assert(!strcmp(bson_iter_key(&iter), "3")); assert(bson_iter_int64(&iter) == 4567); assert(bson_iter_next(&iter)); assert(BSON_ITER_HOLDS_DATE_TIME(&iter)); assert(bson_iter_time_t(&iter) == 123456); assert(bson_iter_date_time(&iter) == 123456000); assert(bson_iter_next(&iter)); /* This test uses memcmp because libbson lacks decimal128 comparison. */ bson_iter_decimal128(&iter, &iter_value); assert(memcmp(&iter_value, &value, sizeof(value)) == 0); assert(!bson_iter_next(&iter)); bson_destroy(b); bson_destroy(b2); }
static gboolean sim_parser_connect_id (bson_iter_t *piter, const char *key, SimCommand *cmd) { g_return_val_if_fail (piter != NULL, FALSE); g_return_val_if_fail (cmd != NULL, FALSE); g_return_val_if_fail (key != NULL, FALSE); gboolean result = FALSE; if (BSON_ITER_HOLDS_INT32(piter)) { cmd->id = bson_iter_int32(piter); result = TRUE; } return result; }
/************************************************************************************************************************* F_SECUNDARIA: Obteniendo CANTIDAD TOTAL DE BLOQUES que conforman un Archivo TESTEADO =) ****************************/ int getCantidadTotalDeBloques_ArchivoDB(const bson_t * documento_X) //Devuelve un num positivo o Devuelve 0 si hay error { bson_iter_t punteroDeBusqueda; bson_iter_t valorEncontrado; int CantidadBloques = 0; if (documento_X != NULL && bson_iter_init (&punteroDeBusqueda, documento_X) && bson_iter_find_descendant (&punteroDeBusqueda, "cantidadDeBloques", &valorEncontrado) && BSON_ITER_HOLDS_INT32 (&valorEncontrado)) { CantidadBloques = bson_iter_int32 (&valorEncontrado); } return CantidadBloques; }
static gboolean sim_parser_connect_type (bson_iter_t *piter, const char *key, SimCommand *cmd) { g_return_val_if_fail (piter != NULL, FALSE); g_return_val_if_fail (cmd != NULL, FALSE); g_return_val_if_fail (key != NULL, FALSE); gboolean result = FALSE; if (BSON_ITER_HOLDS_INT32(piter)) { cmd->data.connect.type = bson_iter_int32 (piter); if (sim_parse_check_session_type (cmd->data.connect.type)) result = TRUE; else g_message ("Bad BSON connect message (type)"); } return result; }
int32_t mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback) { const bson_t *options; bson_iter_t iter; int32_t retval = fallback; if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option) && BSON_ITER_HOLDS_INT32 (&iter)) { if (!(retval = bson_iter_int32(&iter))) { retval = fallback; } } return retval; }
bool _mongoc_populate_cmd_error (const bson_t *doc, int32_t error_api_version, bson_error_t *error) { mongoc_error_domain_t domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_QUERY; uint32_t code = MONGOC_ERROR_QUERY_FAILURE; bson_iter_t iter; const char *msg = "Unknown command error"; ENTRY; BSON_ASSERT (doc); if (bson_iter_init_find (&iter, doc, "ok") && bson_iter_as_bool (&iter)) { /* no error */ RETURN (false); } if (bson_iter_init_find (&iter, doc, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = (uint32_t) bson_iter_int32 (&iter); } if (code == MONGOC_ERROR_PROTOCOL_ERROR || code == 13390) { code = MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND; } if (bson_iter_init_find (&iter, doc, "errmsg") && BSON_ITER_HOLDS_UTF8 (&iter)) { msg = bson_iter_utf8 (&iter, NULL); } bson_set_error (error, domain, code, "%s", msg); RETURN (true); }
SEXP ConvertValue(bson_iter_t* iter){ if(BSON_ITER_HOLDS_INT32(iter)){ return ScalarInteger(bson_iter_int32(iter)); } else if(BSON_ITER_HOLDS_NULL(iter)){ return R_NilValue; } else if(BSON_ITER_HOLDS_BOOL(iter)){ return ScalarLogical(bson_iter_bool(iter)); } else if(BSON_ITER_HOLDS_DOUBLE(iter)){ return ScalarReal(bson_iter_double(iter)); } else if(BSON_ITER_HOLDS_INT64(iter)){ return ScalarReal((double) bson_iter_int64(iter)); } else if(BSON_ITER_HOLDS_UTF8(iter)){ return mkStringUTF8(bson_iter_utf8(iter, NULL)); } else if(BSON_ITER_HOLDS_CODE(iter)){ return mkStringUTF8(bson_iter_code(iter, NULL)); } else if(BSON_ITER_HOLDS_BINARY(iter)){ return ConvertBinary(iter); } else if(BSON_ITER_HOLDS_DATE_TIME(iter)){ return ConvertDate(iter); } else if(BSON_ITER_HOLDS_OID(iter)){ const bson_oid_t *val = bson_iter_oid(iter); char str[25]; bson_oid_to_string(val, str); return mkString(str); } else if(BSON_ITER_HOLDS_ARRAY(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertArray(&child1, &child2); } else if(BSON_ITER_HOLDS_DOCUMENT(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertObject(&child1, &child2); } else { stop("Unimplemented BSON type %d\n", bson_iter_type(iter)); } }
static void _bson_to_error (const bson_t *b, bson_error_t *error) { bson_iter_t iter; int code = 0; BSON_ASSERT(b); if (!error) { return; } if (bson_iter_init_find(&iter, b, "code") && BSON_ITER_HOLDS_INT32(&iter)) { code = bson_iter_int32(&iter); } if (bson_iter_init_find(&iter, b, "$err") && BSON_ITER_HOLDS_UTF8(&iter)) { bson_set_error(error, MONGOC_ERROR_QUERY, code, "%s", bson_iter_utf8(&iter, NULL)); return; } if (bson_iter_init_find(&iter, b, "errmsg") && BSON_ITER_HOLDS_UTF8(&iter)) { bson_set_error(error, MONGOC_ERROR_QUERY, code, "%s", bson_iter_utf8(&iter, NULL)); return; } bson_set_error(error, MONGOC_ERROR_QUERY, MONGOC_ERROR_QUERY_FAILURE, "An unknown error ocurred on the server."); }
SEXP ConvertValue(bson_iter_t* iter){ if(BSON_ITER_HOLDS_INT32(iter)){ return ScalarInteger(bson_iter_int32(iter)); } else if(BSON_ITER_HOLDS_NULL(iter)){ return R_NilValue; } else if(BSON_ITER_HOLDS_BOOL(iter)){ return ScalarLogical(bson_iter_bool(iter)); } else if(BSON_ITER_HOLDS_DOUBLE(iter)){ return ScalarReal(bson_iter_double(iter)); } else if(BSON_ITER_HOLDS_INT64(iter)){ return ScalarReal((double) bson_iter_int64(iter)); } else if(BSON_ITER_HOLDS_UTF8(iter)){ return mkStringUTF8(bson_iter_utf8(iter, NULL)); } else if(BSON_ITER_HOLDS_CODE(iter)){ return mkStringUTF8(bson_iter_code(iter, NULL)); } else if(BSON_ITER_HOLDS_BINARY(iter)){ return ConvertBinary(iter); } else if(BSON_ITER_HOLDS_DATE_TIME(iter)){ return ConvertDate(iter); } else if(BSON_ITER_HOLDS_OID(iter)){ //not sure if this casting works return mkRaw((unsigned char *) bson_iter_oid(iter), 12); } else if(BSON_ITER_HOLDS_ARRAY(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertArray(&child1, &child2); } else if(BSON_ITER_HOLDS_DOCUMENT(iter)){ bson_iter_t child1; bson_iter_t child2; bson_iter_recurse (iter, &child1); bson_iter_recurse (iter, &child2); return ConvertObject(&child1, &child2); } else { stop("Unimplemented BSON type %d\n", bson_iter_type(iter)); } }
static mongoc_stream_t * mongoc_client_connect_tcp (const mongoc_uri_t *uri, const mongoc_host_list_t *host, bson_error_t *error) { mongoc_socket_t *sock = NULL; struct addrinfo hints; struct addrinfo *result, *rp; int32_t connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS; int64_t expire_at; const bson_t *options; bson_iter_t iter; char portstr [8]; int s; ENTRY; bson_return_val_if_fail (uri, NULL); bson_return_val_if_fail (host, NULL); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find (&iter, options, "connecttimeoutms") && BSON_ITER_HOLDS_INT32 (&iter)) { if (!(connecttimeoutms = bson_iter_int32(&iter))) { connecttimeoutms = MONGOC_DEFAULT_CONNECTTIMEOUTMS; } } BSON_ASSERT (connecttimeoutms); expire_at = bson_get_monotonic_time () + (connecttimeoutms * 1000L); bson_snprintf (portstr, sizeof portstr, "%hu", host->port); memset (&hints, 0, sizeof hints); hints.ai_family = host->family; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = 0; hints.ai_protocol = 0; s = getaddrinfo (host->host, portstr, &hints, &result); if (s != 0) { mongoc_counter_dns_failure_inc (); bson_set_error(error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Failed to resolve %s", host->host); RETURN (NULL); } mongoc_counter_dns_success_inc (); for (rp = result; rp; rp = rp->ai_next) { /* * Create a new non-blocking socket. */ if (!(sock = mongoc_socket_new (rp->ai_family, rp->ai_socktype, rp->ai_protocol))) { continue; } /* * Try to connect to the peer. */ if (0 != mongoc_socket_connect (sock, rp->ai_addr, (socklen_t)rp->ai_addrlen, expire_at)) { char errmsg_buf[32]; const char * errmsg; errmsg = bson_strerror_r (mongoc_socket_errno (sock), errmsg_buf, sizeof errmsg_buf); MONGOC_WARNING ("Failed to connect, error: %d, %s\n", mongoc_socket_errno(sock), errmsg); mongoc_socket_destroy (sock); sock = NULL; continue; } break; } if (!sock) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "Failed to connect to target host: %s", host->host_and_port); freeaddrinfo (result); RETURN (NULL); } freeaddrinfo (result); return mongoc_stream_socket_new (sock); }
static void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { uint32_t current_offset; mongoc_iovec_t *iov; const uint8_t *data; mongoc_rpc_t rpc; bson_iter_t iter; uint32_t len; bson_t *gle = NULL; uint32_t size = 0; bool has_more; char ns [MONGOC_NAMESPACE_MAX + 1]; bool r; uint32_t n_docs_in_batch; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); current_offset = offset; max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; r = bson_iter_init (&iter, command->documents); if (!r) { BSON_ASSERT (false); EXIT; } if (!command->n_documents || !bson_iter_next (&iter)) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *)bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t)(sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); do { BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter)); BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx < command->n_documents); bson_iter_document (&iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { /* document is too large */ bson_t write_err_doc = BSON_INITIALIZER; too_large_error (error, idx, len, max_bson_obj_size, &write_err_doc); _mongoc_write_result_merge_legacy ( result, command, &write_err_doc, MONGOC_ERROR_COLLECTION_INSERT_FAILED, offset + idx); bson_destroy (&write_err_doc); if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) data; iov[n_docs_in_batch].iov_len = len; size += len; n_docs_in_batch++; } idx++; } while (bson_iter_next (&iter)); if (n_docs_in_batch) { rpc.insert.msg_len = 0; rpc.insert.request_id = 0; rpc.insert.response_to = 0; rpc.insert.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ( (command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; if (!mongoc_cluster_sendv_to_server (&client->cluster, &rpc, 1, server_stream, write_concern, error)) { result->failed = true; GOTO (cleanup); } if (_mongoc_write_concern_needs_gle (write_concern)) { bool err = false; bson_iter_t citer; if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; GOTO (cleanup); } err = (bson_iter_init_find (&citer, gle, "err") && bson_iter_as_bool (&citer)); /* * Overwrite the "n" field since it will be zero. Otherwise, our * merge_legacy code will not know how many we tried in this batch. */ if (!err && bson_iter_init_find (&citer, gle, "n") && BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) { bson_iter_overwrite_int32 (&citer, n_docs_in_batch); } } } cleanup: if (gle) { _mongoc_write_result_merge_legacy ( result, command, gle, MONGOC_ERROR_COLLECTION_INSERT_FAILED, current_offset); current_offset = offset + idx; bson_destroy (gle); gle = NULL; } if (has_more) { GOTO (again); } bson_free (iov); EXIT; }
static void test_write_concern_basic (void) { mongoc_write_concern_t *write_concern; const bson_t *gle; const bson_t *bson; bson_iter_t iter; write_concern = mongoc_write_concern_new(); BEGIN_IGNORE_DEPRECATIONS; /* * Test defaults. */ ASSERT(write_concern); ASSERT(!mongoc_write_concern_get_fsync(write_concern)); ASSERT(!mongoc_write_concern_get_journal(write_concern)); ASSERT(mongoc_write_concern_get_w(write_concern) == MONGOC_WRITE_CONCERN_W_DEFAULT); ASSERT(!mongoc_write_concern_get_wtimeout(write_concern)); ASSERT(!mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_fsync(write_concern, true); ASSERT(mongoc_write_concern_get_fsync(write_concern)); mongoc_write_concern_set_fsync(write_concern, false); ASSERT(!mongoc_write_concern_get_fsync(write_concern)); mongoc_write_concern_set_journal(write_concern, true); ASSERT(mongoc_write_concern_get_journal(write_concern)); mongoc_write_concern_set_journal(write_concern, false); ASSERT(!mongoc_write_concern_get_journal(write_concern)); /* * Test changes to w. */ mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY); ASSERT(mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT); ASSERT(!mongoc_write_concern_get_wmajority(write_concern)); mongoc_write_concern_set_wmajority(write_concern, 1000); ASSERT(mongoc_write_concern_get_wmajority(write_concern)); ASSERT(mongoc_write_concern_get_wtimeout(write_concern) == 1000); mongoc_write_concern_set_wtimeout(write_concern, 0); ASSERT(!mongoc_write_concern_get_wtimeout(write_concern)); mongoc_write_concern_set_w(write_concern, MONGOC_WRITE_CONCERN_W_DEFAULT); ASSERT(mongoc_write_concern_get_w(write_concern) == MONGOC_WRITE_CONCERN_W_DEFAULT); mongoc_write_concern_set_w(write_concern, 3); ASSERT(mongoc_write_concern_get_w(write_concern) == 3); /* * Check generated bson. */ mongoc_write_concern_set_fsync(write_concern, true); mongoc_write_concern_set_journal(write_concern, true); gle = _mongoc_write_concern_get_gle(write_concern); ASSERT(bson_iter_init_find(&iter, gle, "getlasterror") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 1); ASSERT(bson_iter_init_find(&iter, gle, "fsync") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, gle, "j") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, gle, "w") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 3); ASSERT(gle); bson = _mongoc_write_concern_get_bson(write_concern); ASSERT(!bson_iter_init_find(&iter, bson, "getlasterror")); ASSERT(bson_iter_init_find(&iter, bson, "fsync") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, bson, "j") && BSON_ITER_HOLDS_BOOL(&iter) && bson_iter_bool(&iter)); ASSERT(bson_iter_init_find(&iter, bson, "w") && BSON_ITER_HOLDS_INT32(&iter) && bson_iter_int32(&iter) == 3); ASSERT(bson); mongoc_write_concern_destroy(write_concern); END_IGNORE_DEPRECATIONS; }
static void _mongoc_write_command_update_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { mongoc_rpc_t rpc; bson_iter_t iter, subiter, subsubiter; bson_t doc; bool has_update, has_selector, is_upsert; bson_t update, selector; bson_t *gle = NULL; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns [MONGOC_NAMESPACE_MAX + 1]; int32_t affected = 0; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); bson_iter_init (&iter, command->documents); while (bson_iter_next (&iter)) { if (bson_iter_recurse (&iter, &subiter) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); bson_init_static (&doc, data, len); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter) [0] != '$') && !bson_validate (&doc, (bson_validate_flags_t)vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_iter_init (&iter, command->documents); while (bson_iter_next (&iter)) { rpc.update.msg_len = 0; rpc.update.request_id = 0; rpc.update.response_to = 0; rpc.update.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; has_update = false; has_selector = false; is_upsert = false; bson_iter_recurse (&iter, &subiter); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); rpc.update.update = data; bson_init_static (&update, data, len); has_update = true; } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); rpc.update.selector = data; bson_init_static (&selector, data, len); has_selector = true; } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t)( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t)( rpc.update.flags | MONGOC_UPDATE_UPSERT); } is_upsert = true; } } if (!mongoc_cluster_sendv_to_server (&client->cluster, &rpc, 1, server_stream, write_concern, error)) { result->failed = true; EXIT; } if (_mongoc_write_concern_needs_gle (write_concern)) { if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) { result->failed = true; EXIT; } if (bson_iter_init_find (&subiter, gle, "n") && BSON_ITER_HOLDS_INT32 (&subiter)) { affected = bson_iter_int32 (&subiter); } /* * CDRIVER-372: * * Versions of MongoDB before 2.6 don't return the _id for an * upsert if _id is not an ObjectId. */ if (is_upsert && affected && !bson_iter_init_find (&subiter, gle, "upserted") && bson_iter_init_find (&subiter, gle, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) { if (has_update && bson_iter_init_find (&subiter, &update, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } else if (has_selector && bson_iter_init_find (&subiter, &selector, "_id")) { _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter)); } } _mongoc_write_result_merge_legacy ( result, command, gle, MONGOC_ERROR_COLLECTION_UPDATE_FAILED, offset); offset++; bson_destroy (gle); } } EXIT; }
mongoc_collection_t * mongoc_database_create_collection (mongoc_database_t *database, const char *name, const bson_t *options, bson_error_t *error) { mongoc_collection_t *collection = NULL; bson_iter_t iter; bson_t cmd; bool capped = false; bson_return_val_if_fail (database, NULL); bson_return_val_if_fail (name, NULL); if (strchr (name, '$')) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID, "The namespace \"%s\" is invalid.", name); return NULL; } if (options) { if (bson_iter_init_find (&iter, options, "capped")) { if (!BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"capped\" must be a boolean."); return NULL; } capped = bson_iter_bool (&iter); } if (bson_iter_init_find (&iter, options, "autoIndexId") && !BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"autoIndexId\" must be a boolean."); return NULL; } if (bson_iter_init_find (&iter, options, "size")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"size\" must be an integer."); return NULL; } if (!capped) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"size\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, options, "max")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"max\" must be an integer."); return NULL; } if (!capped) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"size\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, options, "storage")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"storage\" parameter must be a document"); return NULL; } if (bson_iter_find (&iter, "wiredtiger")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredtiger\" option must take a document argument with a \"configString\" field"); return NULL; } if (bson_iter_find (&iter, "configString")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"configString\" parameter must be a string"); return NULL; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredtiger\" option must take a document argument with a \"configString\" field"); return NULL; } } } } bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "create", name); if (options) { if (!bson_iter_init (&iter, options)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"options\" is corrupt or invalid."); bson_destroy (&cmd); return NULL; } while (bson_iter_next (&iter)) { if (!bson_append_iter (&cmd, bson_iter_key (&iter), -1, &iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Failed to append \"options\" to create command."); bson_destroy (&cmd); return NULL; } } } if (mongoc_database_command_simple (database, &cmd, NULL, NULL, error)) { collection = _mongoc_collection_new (database->client, database->name, name, database->read_prefs, database->write_concern); } bson_destroy (&cmd); return collection; }
void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays ( offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); bson_init_static (&write_concern_error, data, len); bson_uint32_to_string ( result->n_writeConcernErrors, &key, str, sizeof str); if (!bson_append_document ( &result->writeConcernErrors, key, -1, &write_concern_error)) { MONGOC_ERROR ("Error adding \"%s\" to writeConcernErrors.\n", key); } result->n_writeConcernErrors++; } /* inefficient if there are ever large numbers: for each label in each err, * we linear-search result->errorLabels to see if it's included yet */ _mongoc_bson_array_copy_labels_to (reply, &result->errorLabels); EXIT; }
static void test_find_and_modify (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_error_t error; bson_iter_t iter; bson_iter_t citer; bson_t *update; bson_t doc = BSON_INITIALIZER; bson_t reply; mongoc_find_and_modify_opts_t *opts; client = test_framework_client_new (); ASSERT (client); collection = get_test_collection (client, "test_find_and_modify"); ASSERT (collection); BSON_APPEND_INT32 (&doc, "superduper", 77889); ASSERT_OR_PRINT (mongoc_collection_insert ( collection, MONGOC_INSERT_NONE, &doc, NULL, &error), error); update = BCON_NEW ("$set", "{", "superduper", BCON_INT32 (1234), "}"); opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_update (opts, update); mongoc_find_and_modify_opts_set_fields (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_sort (opts, tmp_bson ("{'superduper': 1}")); mongoc_find_and_modify_opts_set_flags (opts, MONGOC_FIND_AND_MODIFY_RETURN_NEW); ASSERT_OR_PRINT (mongoc_collection_find_and_modify_with_opts ( collection, &doc, opts, &reply, &error), error); assert (bson_iter_init_find (&iter, &reply, "value")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "superduper")); assert (BSON_ITER_HOLDS_INT32 (&citer)); assert (bson_iter_int32 (&citer) == 1234); assert (bson_iter_init_find (&iter, &reply, "lastErrorObject")); assert (BSON_ITER_HOLDS_DOCUMENT (&iter)); assert (bson_iter_recurse (&iter, &citer)); assert (bson_iter_find (&citer, "updatedExisting")); assert (BSON_ITER_HOLDS_BOOL (&citer)); assert (bson_iter_bool (&citer)); bson_destroy (&reply); bson_destroy (update); ASSERT_OR_PRINT (mongoc_collection_drop (collection, &error), error); mongoc_find_and_modify_opts_destroy (opts); mongoc_collection_destroy (collection); mongoc_client_destroy (client); bson_destroy (&doc); }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }