static void test_batch_insert_with_continue( mongo *conn ) { bson *objs[5]; bson *objs2[5]; int i; mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); mongo_create_simple_index( conn, TEST_NS, "n", MONGO_INDEX_UNIQUE, NULL ); for( i=0; i<5; i++ ) { objs[i] = bson_alloc(); bson_init( objs[i] ); bson_append_int( objs[i], "n", i ); bson_finish( objs[i] ); } ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 5 ); /* Add one duplicate value for n. */ objs2[0] = bson_alloc(); bson_init( objs2[0] ); bson_append_int( objs2[0], "n", 1 ); bson_finish( objs2[0] ); /* Add n for 6 - 9. */ for( i = 1; i < 5; i++ ) { objs2[i] = bson_alloc(); bson_init( objs2[i] ); bson_append_int( objs2[i], "n", i + 5 ); bson_finish( objs2[i] ); } /* Without continue on error, will fail immediately. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 5 ); /* With continue on error, will insert four documents. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, MONGO_CONTINUE_ON_ERROR ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 9 ); for( i=0; i<5; i++ ) { bson_destroy( objs2[i] ); bson_dealloc( objs2[i] ); bson_destroy( objs[i] ); bson_dealloc( objs[i] ); } }
int test_bson_empty( void ) { const bson *empty1; empty1 = bson_shared_empty(); ASSERT( empty1->data ); ASSERT( bson_size(empty1) > 0 ); ALLOW_AND_REQUIRE_MALLOC_BEGIN; bson * empty2 = bson_alloc(); ALLOW_AND_REQUIRE_MALLOC_END; memset( empty2, 0, sizeof( bson) ); bson_init_empty( empty2 ); ASSERT( empty2->data ); ASSERT( bson_size( empty2 ) > 0 ); bson_destroy( empty2 ); ALLOW_AND_REQUIRE_FREE_BEGIN; bson_dealloc( empty2 ); ALLOW_AND_REQUIRE_FREE_END; return 0; }
/* We can test write concern for update * and remove by doing operations on a capped collection. */ static void test_update_and_remove( mongo *conn ) { mongo_write_concern wc[1]; bson *objs[5]; bson query[1], update[1]; int i; create_capped_collection( conn ); for( i=0; i<5; i++ ) { objs[i] = bson_alloc(); bson_init( objs[i] ); bson_append_int( objs[i], "n", i ); bson_finish( objs[i] ); } ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, "test", "wc", bson_shared_empty( ) ) == 5 ); bson_init( query ); bson_append_int( query, "n", 2 ); bson_finish( query ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); bson_init( update ); bson_append_start_object( update, "$set" ); bson_append_string( update, "n", "a big long string" ); bson_append_finish_object( update ); bson_finish( update ); /* Update will appear to succeed with no write concern specified, but doesn't. */ ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); /* Remove will appear to succeed with no write concern specified, but doesn't. */ ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); mongo_write_concern_init( wc ); mongo_write_concern_set_w( wc, 1 ); mongo_write_concern_finish( wc ); mongo_clear_errors( conn ); ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_WRITE_ERROR ); ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" ); mongo_clear_errors( conn ); ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_WRITE_ERROR ); ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" ); mongo_write_concern_destroy( wc ); bson_destroy( query ); bson_destroy( update ); for( i=0; i<5; i++ ) { bson_destroy( objs[i] ); bson_dealloc( objs[i] ); } }
static int wm_write (const data_set_t *ds, /* {{{ */ const value_list_t *vl, user_data_t *ud) { wm_node_t *node = ud->data; char collection_name[512]; bson *bson_record; int status; ssnprintf (collection_name, sizeof (collection_name), "collectd.%s", vl->plugin); bson_record = wm_create_bson (ds, vl, node->store_rates); if (bson_record == NULL) return (ENOMEM); pthread_mutex_lock (&node->lock); if (!mongo_is_connected (node->conn)) { INFO ("write_mongodb plugin: Connecting to [%s]:%i", (node->host != NULL) ? node->host : "localhost", (node->port != 0) ? node->port : MONGO_DEFAULT_PORT); status = mongo_connect (node->conn, node->host, node->port); if (status != MONGO_OK) { ERROR ("write_mongodb plugin: Connecting to [%s]:%i failed.", (node->host != NULL) ? node->host : "localhost", (node->port != 0) ? node->port : MONGO_DEFAULT_PORT); mongo_destroy (node->conn); pthread_mutex_unlock (&node->lock); return (-1); } if ((node->db != NULL) && (node->user != NULL) && (node->passwd != NULL)) { status = mongo_cmd_authenticate (node->conn, node->db, node->user, node->passwd); if (status != MONGO_OK) { ERROR ("write_mongodb plugin: Authenticating to [%s]%i for database " "\"%s\" as user \"%s\" failed.", (node->host != NULL) ? node->host : "localhost", (node->port != 0) ? node->port : MONGO_DEFAULT_PORT, node->db, node->user); mongo_destroy (node->conn); pthread_mutex_unlock (&node->lock); return (-1); } } if (node->timeout > 0) { status = mongo_set_op_timeout (node->conn, node->timeout); if (status != MONGO_OK) { WARNING ("write_mongodb plugin: mongo_set_op_timeout(%i) failed: %s", node->timeout, node->conn->errstr); } } } /* Assert if the connection has been established */ assert (mongo_is_connected (node->conn)); #if MONGO_MINOR >= 6 /* There was an API change in 0.6.0 as linked below */ /* https://github.com/mongodb/mongo-c-driver/blob/master/HISTORY.md */ status = mongo_insert (node->conn, collection_name, bson_record, NULL); #else status = mongo_insert (node->conn, collection_name, bson_record); #endif if (status != MONGO_OK) { ERROR ( "write_mongodb plugin: error inserting record: %d", node->conn->err); if (node->conn->err != MONGO_BSON_INVALID) ERROR ("write_mongodb plugin: %s", node->conn->errstr); else ERROR ("write_mongodb plugin: Invalid BSON structure, error = %#x", (unsigned int) bson_record->err); /* Disconnect except on data errors. */ if ((node->conn->err != MONGO_BSON_INVALID) && (node->conn->err != MONGO_BSON_NOT_FINISHED)) mongo_destroy (node->conn); } pthread_mutex_unlock (&node->lock); /* free our resource as not to leak memory */ bson_destroy (bson_record); /* matches bson_init() */ bson_dealloc (bson_record); /* matches bson_alloc() */ return (0); } /* }}} int wm_write */