void test_bson_null (void) { bson *b; b = bson_new (); ok (bson_append_null (b, "null"), "bson_append_null() works"); bson_finish (b); cmp_ok (bson_size (b), "==", 11, "BSON NULL element size check"); ok (memcmp (bson_data (b), "\013\000\000\000\012\156\165\154\154\000\000", bson_size (b)) == 0, "BSON NULL element contents check"); bson_free (b); b = bson_new (); ok (bson_append_null (b, NULL) == FALSE, "bson_append_null() should fail without a key name"); ok (bson_append_null (NULL, "null") == FALSE, "bson_append_null() should fail without a BSON object"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_null (b, "null") == FALSE, "Appending to a finished element should fail"); bson_free (b); }
int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile ) { bson uploadDate; bson finalQuery; bson out; int i; bson_init( &uploadDate ); bson_append_int( &uploadDate, "uploadDate", -1 ); bson_finish( &uploadDate ); bson_init( &finalQuery ); bson_append_bson( &finalQuery, "query", query ); bson_append_bson( &finalQuery, "orderby", &uploadDate ); bson_finish( &finalQuery ); i = ( mongo_find_one( gfs->client, gfs->files_ns, &finalQuery, NULL, &out ) == MONGO_OK ); bson_destroy( &uploadDate ); bson_destroy( &finalQuery ); if ( !i ) return MONGO_ERROR; else { gridfile_init( gfs, &out, gfile ); bson_destroy( &out ); return MONGO_OK; } }
int update_filesize(struct inode * e, off_t newsize) { bson cond, doc; mongo * conn = get_conn(); int res; if(newsize < e->size) return 0; e->size = newsize; bson_init(&cond); bson_append_oid(&cond, "_id", &e->oid); bson_finish(&cond); bson_init(&doc); bson_append_start_object(&doc, "$set"); bson_append_long(&doc, "size", newsize); bson_append_finish_object(&doc); bson_finish(&doc); res = mongo_update(conn, inodes_name, &cond, &doc, 0, NULL); bson_destroy(&cond); bson_destroy(&doc); if(res != 0) return -EIO; return 0; }
static void do_inserts (mongo_sync_connection *conn) { bson *base; gint i; base = bson_build (BSON_TYPE_STRING, "tutorial-program", "tut_hl_client.c", -1, BSON_TYPE_INT32, "the answer to life, the universe and everything", 42, BSON_TYPE_NONE); bson_finish (base); for (i = 0; i < 1000; i++) { bson *n; n = bson_new_from_data (bson_data (base), bson_size (base) - 1); bson_append_int32 (n, "counter", i); bson_finish (n); if (!mongo_sync_cmd_insert (conn, "lmc.tutorial", n, NULL)) { fprintf (stderr, "Error inserting document %d: %s\n", i, strerror (errno)); exit (1); } bson_free (n); } bson_free (base); }
std::string collection::insert(nlohmann::json::object_t const& document) throw(std::runtime_error) { bson_oid_t oid; std::shared_ptr<bson> bson_doc = convert_to_bson(document); if(document.find("_id") != document.end()) { std::string const &id = document.find("_id")->second; if(!ejdbisvalidoidstr(id.c_str())) { throw ejdb_exception(JBEINVALIDBSONPK); } bson_oid_from_string(&oid, id.c_str()); std::shared_ptr<bson> bson_oid(bson_create(), bson_del); bson_init(bson_oid.get()); bson_append_oid(bson_oid.get(), "_id", &oid); bson_finish(bson_oid.get()); std::shared_ptr<bson> bson_doc_with_oid(bson_create(), bson_del); bson_init(bson_doc_with_oid.get()); bson_merge(bson_oid.get(), bson_doc.get(), true, bson_doc_with_oid.get()); bson_finish(bson_doc_with_oid.get()); bson_doc.swap(bson_doc_with_oid); } if(!ejdbsavebson(_coll.get(), bson_doc.get(), &oid)) { throw_last_ejdb_exception(); } std::string id(24, '\0'); bson_oid_to_string(&oid, &id[0]); document_added(id, document); return id; }
void test_bson_timestamp (void) { bson *b; gint64 l = 9876543210; b = bson_new (); ok (bson_append_timestamp (b, "ts", l), "bson_append_timestamp() works"); bson_finish (b); cmp_ok (bson_size (b), "==", 17, "BSON timestamp element size check"); ok (memcmp (bson_data (b), "\021\000\000\000\021\164\163\000\352\026\260\114\002\000\000" "\000\000", bson_size (b)) == 0, "BSON timestamp element contents check"); bson_free (b); b = bson_new (); ok (bson_append_timestamp (b, NULL, l) == FALSE, "bson_append_timestamp() with a NULL key should fail"); ok (bson_append_timestamp (NULL, "ts", l) == FALSE, "bson_append_timestamp() without a BSON object should fail"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_timestamp (b, "ts", l) == FALSE, "Appending to a finished element should fail"); bson_free (b); }
int inode_exists(const char * path) { bson query, fields; mongo * conn = get_conn(); mongo_cursor curs; int res; bson_init(&query); bson_append_string(&query, "dirents", path); bson_finish(&query); bson_init(&fields); bson_append_int(&fields, "dirents", 1); bson_append_int(&fields, "_id", 0); bson_finish(&fields); mongo_cursor_init(&curs, conn, inodes_name); mongo_cursor_set_query(&curs, &query); mongo_cursor_set_fields(&curs, &fields); mongo_cursor_set_limit(&curs, 1); res = mongo_cursor_next(&curs); bson_destroy(&query); bson_destroy(&fields); mongo_cursor_destroy(&curs); if(res == 0) return 0; if(curs.err != MONGO_CURSOR_EXHAUSTED) return -EIO; return -ENOENT; }
void test_bson_utc_datetime (void) { bson *b; b = bson_new (); ok (bson_append_utc_datetime (b, "date", 1294860709000), "bson_append_utc_datetime() works"); bson_finish (b); cmp_ok (bson_size (b), "==", 19, "BSON UTC datetime element size check"); ok (memcmp (bson_data (b), "\023\000\000\000\011\144\141\164\145\000\210\154\266\173\055" "\001\000\000\000", bson_size (b)) == 0, "BSON UTC datetime element contents check"); bson_free (b); b = bson_new (); ok (bson_append_utc_datetime (b, NULL, 1294860709000) == FALSE, "bson_append_utc_datetime() with a NULL key should fail"); ok (bson_append_utc_datetime (NULL, "date", 1294860709000) == FALSE, "bson_append_utc_datetime() without a BSON object should fail"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_utc_datetime (b, "date", 1294860709000) == FALSE, "Appending to a finished element should fail"); bson_free (b); }
static int set_disq_comment_func( struct xuser_cnts_state *data, int user_id, const unsigned char *disq_comment) { struct xuser_mongo_cnts_state *state = (struct xuser_mongo_cnts_state *) data; struct team_extra *extra = do_get_entry(state, user_id); if (!extra) return -1; if (!extra->disq_comment && !disq_comment) { return 0; } if (extra->disq_comment && !disq_comment) { ASSERT(ej_uuid_is_nonempty(extra->uuid)); xfree(extra->disq_comment); extra->disq_comment = NULL; bson *doc = bson_new(); bson_append_string(doc, "disq_comment", "", 0); bson_finish(doc); return do_update(state, extra, "$unset", doc); } if (extra->disq_comment && !strcmp(extra->disq_comment, disq_comment)) return 0; xfree(extra->disq_comment); extra->disq_comment = xstrdup(disq_comment); if (ej_uuid_is_nonempty(extra->uuid)) { bson *doc = bson_new(); bson_append_string(doc, "disq_comment", extra->disq_comment, strlen(extra->disq_comment)); bson_finish(doc); return do_update(state, extra, NULL, doc); } else { return do_insert(state, extra); } }
void test_index_helper( mongo *conn ) { int ret; bson b, out; bson_iterator it; bson_init( &b ); bson_append_int( &b, "foo", -1 ); bson_finish( &b ); mongo_create_index( conn, "test.bar", &b, NULL, MONGO_INDEX_SPARSE | MONGO_INDEX_UNIQUE, &out ); bson_destroy( &b ); bson_destroy( &out ); bson_init( &b ); bson_append_start_object( &b, "key" ); bson_append_int( &b, "foo", -1 ); bson_append_finish_object( &b ); bson_finish( &b ); ret = mongo_find_one( conn, "test.system.indexes", &b, NULL, &out ); ASSERT( ret == MONGO_OK ); bson_print( &out ); bson_iterator_init( &it, &out ); ASSERT( bson_find( &it, &out, "unique" ) ); ASSERT( bson_find( &it, &out, "sparse" ) ); bson_destroy( &b ); bson_destroy( &out ); }
void test_func_mongo_sync_max_insert_size (void) { mongo_sync_connection *conn; const bson *docs[10]; bson *b1, *b2, *b3; b1 = bson_new (); bson_append_string (b1, "func_mongo_sync_max_insert_size", "works", -1); bson_finish (b1); b2 = bson_new (); bson_append_int32 (b2, "int32", 1984); bson_finish (b2); b3 = bson_new (); bson_finish (b3); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); /* * cmd_insert_n() */ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) + bson_size (b3) + 1); docs[0] = b1; docs[1] = b2; docs[2] = b3; ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == TRUE, "mongo_sync_cmd_insert_n() works with a small max_insert_size"); mongo_sync_conn_set_max_insert_size (conn, 1); errno = 0; ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == FALSE, "mongo_sync_cmd_insert_n() should fail if any one document is too big"); cmp_ok (errno, "==", EMSGSIZE, "errno is set to EMSGSIZE"); /* * cmd_insert() */ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) + bson_size (b3) + 1); ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == TRUE, "mongo_sync_cmd_insert() works with a small max_insert_size"); mongo_sync_conn_set_max_insert_size (conn, 1); errno = 0; ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == FALSE, "mongo_sync_cmd_insert() should fail if any one document is too big"); cmp_ok (errno, "==", EMSGSIZE, "errno is set to EMSGSIZE"); mongo_sync_disconnect (conn); bson_free (b1); bson_free (b2); bson_free (b3); }
/* Test read timeout by causing the * server to sleep for 10s on a query. */ int test_read_timeout( void ) { mongo conn[1]; bson b, obj, out, fields; int res; CONN_CLIENT_TEST; bson_init( &b ); bson_append_code( &b, "$where", "sleep( 10 * 1000 );"); bson_finish( &b ); bson_init( &obj ); bson_append_string( &obj, "foo", "bar"); bson_finish( &obj ); res = mongo_insert( conn, "test.foo", &obj, NULL ); /* Set the connection timeout here. */ if( mongo_set_op_timeout( conn, 1000 ) != MONGO_OK ) { printf("Could not set socket timeout!."); exit(1); } res = mongo_find_one( conn, "test.foo", &b, bson_empty(&fields), &out ); ASSERT( res == MONGO_ERROR ); ASSERT( conn->err == MONGO_IO_ERROR ); ASSERT( conn->errcode == WSAETIMEDOUT ); return 0; }
void test_mongo_sync_cmd_kill_cursors_net (void) { mongo_packet *p; mongo_sync_connection *conn; bson *b; gint i; mongo_reply_packet_header rh; gint64 cid; begin_network_tests (3); conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE); mongo_sync_conn_set_auto_reconnect (conn, TRUE); b = bson_new (); for (i = 0; i < 40; i++) { bson_reset (b); bson_append_string (b, "test-name", __FILE__, -1); bson_append_int32 (b, "seq", i); bson_finish (b); mongo_sync_cmd_insert (conn, config.ns, b, NULL); } bson_free (b); b = bson_new (); bson_append_string (b, "test-name", __FILE__, -1); bson_finish (b); p = mongo_sync_cmd_query (conn, config.ns, MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT, 0, 2, b, NULL); mongo_wire_reply_packet_get_header (p, &rh); cid = rh.cursor_id; mongo_wire_packet_free (p); ok (mongo_sync_cmd_kill_cursors (conn, 1, cid) == TRUE, "mongo_sync_kill_cursors() works"); p = mongo_sync_cmd_query (conn, config.ns, MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT, 0, 2, b, NULL); bson_free (b); mongo_wire_reply_packet_get_header (p, &rh); cid = rh.cursor_id; mongo_wire_packet_free (p); shutdown (conn->super.fd, SHUT_RDWR); sleep (3); ok (mongo_sync_cmd_kill_cursors (conn, 1, cid) == TRUE, "mongo_sync_cmd_kill_cursors() automatically reconnects"); mongo_sync_disconnect (conn); test_mongo_sync_cmd_kill_cursors_net_secondary (); end_network_tests (); }
int mongodb_update_key_stat(bot_t * bot, char *db, char *key, int value) { bson b; bson_iterator i; mongo_cursor cursor; int ret = 0; if (!db || !key || value < 0) { return -1; } debug(bot, "mongodb_update_key_stat: Entered\n"); bson_init(&b); bson_append_string(&b, "key", key); bson_finish(&b); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) != MONGO_OK) { mongodb_insert_key_stat(bot, db, key, 0); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) != MONGO_OK) goto cleanup; } debug(bot, "mongodb_update_key_stat: Found!\n"); if (bson_find(&i, mongo_cursor_bson(&cursor), "value")) { bson c; ret = (int)bson_iterator_int(&i); ret++; bson_init(&c); bson_append_string(&c, "key", key); bson_append_int(&c, "value", ret); bson_finish(&c); debug(bot, "mongodb_update_key_stat: updating to %i\n", ret); mongo_update(&gi->mongo_conn, db, &b, &c, 0); bson_destroy(&c); bson_destroy(&b); mongo_cursor_destroy(&cursor); return ret; } debug(bot, "mongodb_update_key_stat: Key not found\n"); cleanup: bson_destroy(&b); mongo_cursor_destroy(&cursor); return -1; }
MONGO_EXPORT int mongo_cmd_add_user( mongo *conn, const char *db, const char *user, const char *pass ) { bson user_obj; bson pass_obj; char hex_digest[33]; char *ns = bson_malloc( strlen( db ) + strlen( ".system.users" ) + 1 ); int res; strcpy( ns, db ); strcpy( ns+strlen( db ), ".system.users" ); mongo_pass_digest( user, pass, hex_digest ); bson_init( &user_obj ); bson_append_string( &user_obj, "user", user ); bson_finish( &user_obj ); bson_init( &pass_obj ); bson_append_start_object( &pass_obj, "$set" ); bson_append_string( &pass_obj, "pwd", hex_digest ); bson_append_finish_object( &pass_obj ); bson_finish( &pass_obj ); res = mongo_update( conn, ns, &user_obj, &pass_obj, MONGO_UPDATE_UPSERT ); bson_free( ns ); bson_destroy( &user_obj ); bson_destroy( &pass_obj ); return res; }
int test_namespace_validation_on_insert( void ) { mongo conn[1]; bson b[1], b2[1]; bson *objs[2]; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } bson_init( b ); bson_append_int( b, "foo", 1 ); bson_finish( b ); ASSERT( mongo_insert( conn, "tet.fo$o", b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_NS_INVALID ); ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); mongo_clear_errors( conn ); bson_init( b2 ); bson_append_int( b2, "foo", 1 ); bson_finish( b2 ); objs[0] = b; objs[1] = b2; ASSERT( mongo_insert_batch( conn, "tet.fo$o", (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_NS_INVALID ); ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); return 0; }
void test_bson_document (void) { bson *b, *e1, *e2; e1 = bson_new (); bson_append_int32 (e1, "i32", 1984); bson_append_string (e1, "str", "hello world", -1); bson_finish (e1); e2 = bson_new (); bson_append_string (e2, "foo", "bar", -1); ok (bson_append_document (e2, "subd", e1), "bson_append_document() works"); bson_finish (e2); bson_free (e1); b = bson_new (); ok (bson_append_document (b, "doc", e2), "bson_append_document() works still"); bson_finish (b); bson_free (e2); cmp_ok (bson_size (b), "==", 69, "BSON document element size check"); ok (memcmp (bson_data (b), "\105\000\000\000\003\144\157\143\000\073\000\000\000\002\146" "\157\157\000\004\000\000\000\142\141\162\000\003\163\165\142" "\144\000\043\000\000\000\020\151\063\062\000\300\007\000\000" "\002\163\164\162\000\014\000\000\000\150\145\154\154\157\040" "\167\157\162\154\144\000\000\000\000", bson_size (b)) == 0, "BSON document element contents check"); bson_free (b); e1 = bson_new (); bson_append_int32 (e1, "foo", 42); b = bson_new (); ok (bson_append_document (b, "doc", e1) == FALSE, "bson_append_document() with an unfinished document should fail"); bson_finish (e1); ok (bson_append_document (b, NULL, e1) == FALSE, "bson_append_document() with a NULL key should fail"); ok (bson_append_document (b, "doc", NULL) == FALSE, "bson_append_document() with a NULL document should fail"); ok (bson_append_document (NULL, "doc", e1) == FALSE, "bson_append_document() without a BSON object should fail"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_document (b, "doc", e1) == FALSE, "Appending to a finished element should fail"); bson_free (e1); bson_free (b); }
int test_insert_limits( const char *set_name ) { char version[10]; mongo conn[1]; mongo_write_concern wc[1]; int i; char key[10]; int res = 0; bson b[1], b2[1]; bson *objs[2]; mongo_write_concern_init( wc ); wc->w = 1; mongo_write_concern_finish( wc ); /* We'll perform the full test if we're running v2.0 or later. */ if( mongo_get_server_version( version ) != -1 && version[0] <= '1' ) return 0; mongo_replset_init( conn, set_name ); mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 ); mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT ); res = mongo_replset_connect( conn ); if( res != MONGO_OK ) { res = conn->err; return res; } ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE ); bson_init( b ); for(i=0; i<1200000; i++) { sprintf( key, "%d", i + 10000000 ); bson_append_int( b, key, i ); } bson_finish( b ); ASSERT( bson_size( b ) > conn->max_bson_size ); ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); mongo_clear_errors( conn ); ASSERT( conn->err == 0 ); bson_init( b2 ); bson_append_int( b2, "foo", 1 ); bson_finish( b2 ); objs[0] = b; objs[1] = b2; ASSERT( mongo_insert_batch( conn, "test.foo", (const bson**)objs, 2, wc, 0 ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); mongo_write_concern_destroy( wc ); return 0; }
void test_mongo_wire_cmd_custom (void) { bson *cmd; mongo_packet *p; mongo_packet_header hdr; const guint8 *data; gint32 data_size; bson_cursor *c; gint32 pos; cmd = bson_new (); bson_append_int32 (cmd, "getnonce", 1); ok (mongo_wire_cmd_custom (1, "test", 0, NULL) == NULL, "mongo_wire_cmd_custom() fails with a NULL command"); ok (mongo_wire_cmd_custom (1, "test", 0, cmd) == NULL, "mongo_wire_cmd_custom() fails with an unfinished command"); bson_finish (cmd); ok (mongo_wire_cmd_custom (1, NULL, 0, cmd) == NULL, "mongo_wire_cmd_custom() fails with a NULL db"); ok ((p = mongo_wire_cmd_custom (1, "test", 0, cmd)) != NULL, "mongo_wire_cmd_custom() works"); bson_free (cmd); /* Verify the header */ mongo_wire_packet_get_header (p, &hdr); cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1, "Packet data size looks fine"); cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size, "Packet header length is OK"); cmp_ok (hdr.id, "==", 1, "Packet request ID is ok"); cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok"); /* * Test the created request */ /* pos = zero + collection_name + NULL + skip + ret */ pos = sizeof (gint32) + strlen ("test.$cmd") + 1 + sizeof (gint32) * 2; ok ((cmd = bson_new_from_data (data + pos, _DOC_SIZE (data, pos) - 1)) != NULL, "Packet contains a BSON document"); bson_finish (cmd); ok ((c = bson_find (cmd, "getnonce")) != NULL, "BSON object contains a 'getnonce' key"); cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32, "'getnonce' key has the correct type"); ok (bson_cursor_next (c) == FALSE, "'getnonce' key is the last in the object"); bson_cursor_free (c); bson_free (cmd); mongo_wire_packet_free (p); }
void test_bson_array (void) { bson *b, *e1, *e2; e1 = bson_new (); bson_append_int32 (e1, "0", 1984); bson_append_string (e1, "1", "hello world", -1); bson_finish (e1); e2 = bson_new (); bson_append_string (e2, "0", "bar", -1); ok (bson_append_array (e2, "1", e1), "bson_append_array() works"); bson_finish (e2); bson_free (e1); b = bson_new (); ok (bson_append_array (b, "0", e2), "bson_append_array() works still"); bson_finish (b); bson_free (e2); cmp_ok (bson_size (b), "==", 58, "BSON array element size check"); ok (memcmp (bson_data (b), "\072\000\000\000\004\060\000\062\000\000\000\002\060\000\004" "\000\000\000\142\141\162\000\004\061\000\037\000\000\000\020" "\060\000\300\007\000\000\002\061\000\014\000\000\000\150\145" "\154\154\157\040\167\157\162\154\144\000\000\000\000", bson_size (b)) == 0, "BSON array element contents check"); bson_free (b); e1 = bson_new (); bson_append_int32 (e1, "0", 1984); b = bson_new (); ok (bson_append_array (b, "array", e1) == FALSE, "bson_append_array() with an unfinished array should fail"); bson_finish (e1); ok (bson_append_array (b, NULL, e1) == FALSE, "bson_append_array() with a NULL name should fail"); ok (bson_append_array (b, "foo", NULL) == FALSE, "bson_append_array() with a NULL array should fail"); ok (bson_append_array (NULL, "foo", e1) == FALSE, "bson_append_array() with a NULL BSON should fail"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_array (b, "array", e1) == FALSE, "Appending to a finished element should fail"); bson_free (e1); bson_free (b); }
void test_batch_insert_with_continue( mongo *conn ) { bson *objs[5]; bson *objs2[5]; bson empty; int i; mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); mongo_create_simple_index( conn, TEST_NS, "n", MONGO_INDEX_UNIQUE, NULL ); for( i=0; i<5; i++ ) { objs[i] = bson_malloc( sizeof( bson ) ); bson_init( objs[i] ); bson_append_int( objs[i], "n", i ); bson_finish( objs[i] ); } ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 5 ); /* Add one duplicate value for n. */ objs2[0] = bson_malloc( sizeof( bson ) ); bson_init( objs2[0] ); bson_append_int( objs2[0], "n", 1 ); bson_finish( objs2[0] ); /* Add n for 6 - 9. */ for( i = 1; i < 5; i++ ) { objs2[i] = bson_malloc( sizeof( bson ) ); bson_init( objs2[i] ); bson_append_int( objs2[i], "n", i + 5 ); bson_finish( objs2[i] ); } /* Without continue on error, will fail immediately. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 5 ); /* With continue on error, will insert four documents. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, MONGO_CONTINUE_ON_ERROR ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 9 ); for( i=0; i<5; i++ ) { bson_destroy( objs2[i] ); bson_free( objs2[i] ); bson_destroy( objs[i] ); bson_free( objs[i] ); } }
void test_bson_string (void) { bson *b; /* Test #1: A single string element, with default size. */ b = bson_new (); ok (bson_append_string (b, "hello", "world", -1), "bson_append_string() works"); bson_finish (b); cmp_ok (bson_size (b), "==", 22, "BSON string element size check"); ok (memcmp (bson_data (b), "\026\000\000\000\002\150\145\154\154\157\000\006\000\000\000" "\167\157\162\154\144\000\000", bson_size (b)) == 0, "BSON string element contents check"); bson_free (b); /* Test #2: A single string element, with explicit length. */ b = bson_new (); ok (bson_append_string (b, "goodbye", "cruel world, this garbage is gone.", strlen ("cruel world")), "bson_append_string() with explicit length works"); bson_finish (b); cmp_ok (bson_size (b), "==", 30, "BSON string element size check, #2"); ok (memcmp (bson_data (b), "\036\000\000\000\002\147\157\157\144\142\171\145\000\014\000" "\000\000\143\162\165\145\154\040\167\157\162\154\144\000\000", bson_size (b)) == 0, "BSON string element contents check, #2"); bson_free (b); /* Test #3: Negative test, passing invalid arguments. */ b = bson_new (); ok (bson_append_string (b, "hello", "world", -42) == FALSE, "bson_append_string() should fail with invalid length"); ok (bson_append_string (b, "hello", NULL, -1) == FALSE, "bson_append_string() should fail without a string"); ok (bson_append_string (b, NULL, "world", -1) == FALSE, "bson_append_string() should fail without a key name"); ok (bson_append_string (NULL, "hello", "world", -1) == FALSE, "bson_append_string() should fail without a BSON object"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_string (b, "hello", "world", -1) == FALSE, "Appending to a finished element should fail"); bson_free (b); }
int test_insert_limits( void ) { char version[10]; mongo conn[1]; int i; char key[10]; bson b[1], b2[1]; bson *objs[2]; /* Test the default max BSON size. */ mongo_init( conn ); ASSERT( conn->max_bson_size == MONGO_DEFAULT_MAX_BSON_SIZE ); /* We'll perform the full test if we're running v2.0 or later. */ if( mongo_get_server_version( version ) != -1 && version[0] <= '1' ) return 0; if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE ); bson_init( b ); for(i=0; i<1200000; i++) { sprintf( key, "%d", i + 10000000 ); bson_append_int( b, key, i ); } bson_finish( b ); ASSERT( bson_size( b ) > conn->max_bson_size ); ASSERT( mongo_insert( conn, "test.foo", b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); mongo_clear_errors( conn ); ASSERT( conn->err == 0 ); bson_init( b2 ); bson_append_int( b2, "foo", 1 ); bson_finish( b2 ); objs[0] = b; objs[1] = b2; ASSERT( mongo_insert_batch( conn, "test.foo", (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); return 0; }
NEOERR* mmg_prepare(mmg_conn *db, int flags, int skip, int limit, NEOERR* (*qcbk)(mmg_conn *db, HDF *node, bool lastone), char *sels, char *querys) { if (!db || !querys) return nerr_raise(NERR_ASSERT, "paramter null"); mtc_noise("prepare %s %s", querys, sels); /* * doc query */ if (db->docq) { bson_free(db->docq); db->docq = NULL; } db->docq = mbson_new_from_string(querys, true); if (!db->docq) return nerr_raise(NERR_ASSERT, "build query: %s: %s", querys, strerror(errno)); /* * doc selector */ if (db->docs) { bson_free(db->docs); db->docs = NULL; } if (sels) { db->docs = mbson_new_from_string(sels, false); if (!db->docs) return nerr_raise(NERR_ASSERT, "build selector: %s: %s", sels, strerror(errno)); if (!(flags & MMG_FLAG_GETOID)) bson_append_int32(db->docs, "_id", 0); bson_finish(db->docs); } else if (!(flags & MMG_FLAG_GETOID)) { db->docs = bson_build(BSON_TYPE_INT32, "_id", 0, BSON_TYPE_NONE); bson_finish(db->docs); } /* * later mmg_prepare won't overwrite formal's callback */ if (db->incallback && qcbk != NULL) return nerr_raise(NERR_ASSERT, "already in callback, can't set callback"); if (!db->incallback) db->query_callback = qcbk; db->flags = flags; db->skip = skip; db->limit = limit; db->rescount = 0; return STATUS_OK; }
int mongodb_insert_key(bot_t * bot, char *db, char *key, char *value, char *fmt, ...) { bson b; mongo_cursor cursor; va_list ap; char buf[1024], *buf_ptr = "NULL"; if (!db || !key || !value) { return -1; } debug(bot, "mongodb_insert_key: Entered\n"); if (fmt) { bz(buf); va_start(ap, fmt); vsnprintf_buf(buf, fmt, ap); va_end(ap); buf_ptr = buf; } bson_init(&b); bson_append_string(&b, "key", key); bson_finish(&b); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) == MONGO_OK) { debug(bot, "mongodb_insert_key: Key already exist\n"); bson_destroy(&b); mongo_cursor_destroy(&cursor); return -1; } bson_init(&b); bson_append_string(&b, "key", key); bson_append_string(&b, "value", value); bson_append_string(&b, "comment", buf_ptr); bson_finish(&b); mongo_insert(&gi->mongo_conn, db, &b); bson_destroy(&b); mongo_cursor_destroy(&cursor); return 0; }
void testCheckDuplicates2(void) { bson b, b2; bson_iterator it, sit; bson_type bt; bson_init(&b); bson_append_start_array(&b, "array"); bson_append_int(&b, "0", 1); bson_append_finish_array(&b); bson_append_start_array(&b, "array"); bson_append_int(&b, "0", 3); bson_append_int(&b, "1", 4); bson_append_finish_array(&b); bson_finish(&b); CU_ASSERT_FALSE_FATAL(b.err); CU_ASSERT_TRUE_FATAL(bson_check_duplicate_keys(&b)); bson_init(&b2); bson_fix_duplicate_keys(&b, &b2); bson_finish(&b2); CU_ASSERT_FALSE_FATAL(b2.err); CU_ASSERT_FALSE_FATAL(bson_check_duplicate_keys(&b2)); BSON_ITERATOR_INIT(&it, &b2); bt = bson_iterator_next(&it); CU_ASSERT_EQUAL_FATAL(bt, BSON_ARRAY); CU_ASSERT_STRING_EQUAL_FATAL(BSON_ITERATOR_KEY(&it), "array"); BSON_ITERATOR_SUBITERATOR(&it, &sit); bt = bson_iterator_next(&sit); CU_ASSERT_STRING_EQUAL_FATAL(BSON_ITERATOR_KEY(&sit), "0"); CU_ASSERT_TRUE_FATAL(BSON_IS_NUM_TYPE(bt)); CU_ASSERT_EQUAL_FATAL(bson_iterator_int(&sit), 1); bt = bson_iterator_next(&sit); CU_ASSERT_STRING_EQUAL_FATAL(BSON_ITERATOR_KEY(&sit), "1"); CU_ASSERT_TRUE_FATAL(BSON_IS_NUM_TYPE(bt)); CU_ASSERT_EQUAL_FATAL(bson_iterator_int(&sit), 3); bt = bson_iterator_next(&sit); CU_ASSERT_STRING_EQUAL_FATAL(BSON_ITERATOR_KEY(&sit), "2"); CU_ASSERT_TRUE_FATAL(BSON_IS_NUM_TYPE(bt)); CU_ASSERT_EQUAL_FATAL(bson_iterator_int(&sit), 4); bt = bson_iterator_next(&sit); CU_ASSERT_EQUAL_FATAL(bt, BSON_EOO); bson_destroy(&b2); bson_destroy(&b); }
void tut_sync_insert (void) { mongo_sync_connection *conn; bson *doc1, *doc2, *doc3; conn = mongo_sync_connect ("localhost", 27017, FALSE); if (!conn) { perror ("mongo_sync_connect()"); exit (1); } doc1 = bson_build (BSON_TYPE_STRING, "hello", "world", -1, BSON_TYPE_INT32, "the_final_answer", 42, BSON_TYPE_BOOLEAN, "yes?", FALSE, BSON_TYPE_INT32, "n", 1, BSON_TYPE_NONE); bson_finish (doc1); if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc1, NULL)) { perror ("mongo_sync_cmd_insert()"); exit (1); } doc2 = bson_build (BSON_TYPE_INT32, "n", 2, BSON_TYPE_BOOLEAN, "yes?", FALSE, BSON_TYPE_STRING, "hello", "dolly", -1, BSON_TYPE_NONE); bson_finish (doc2); doc3 = bson_build (BSON_TYPE_INT32, "n", 3, BSON_TYPE_STRING, "hello", "nurse", -1, BSON_TYPE_BOOLEAN, "yes?", TRUE, BSON_TYPE_NONE); bson_finish (doc3); if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc2, doc3, NULL)) { perror ("mongo_sync_cmd_insert()"); exit (1); } bson_free (doc3); bson_free (doc2); bson_free (doc1); mongo_sync_disconnect (conn); }
void test_bson_binary (void) { bson *b; b = bson_new (); ok (bson_append_binary (b, "binary0", BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", 7), "bson_append_binary(), type 0 works"); ok (bson_append_binary (b, "binary2", BSON_BINARY_SUBTYPE_BINARY, (guint8 *)"\0\0\0\7foo\0bar", 11), "bson_append_binary(), type 2 works"); bson_finish (b); cmp_ok (bson_size (b), "==", 51, "BSON binary element size check"); ok (memcmp (bson_data (b), "\063\000\000\000\005\142\151\156\141\162\171\060\000\007\000" "\000\000\000\146\157\157\000\142\141\162\005\142\151\156\141" "\162\171\062\000\013\000\000\000\002\000\000\000\007\146\157" "\157\000\142\141\162\000", bson_size (b)) == 0, "BSON binary element contents check"); bson_free (b); b = bson_new (); ok (bson_append_binary (b, NULL, BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", 7) == FALSE, "bson_append_binary() without a key name should fail"); ok (bson_append_binary (b, "binary1", BSON_BINARY_SUBTYPE_GENERIC, NULL, 10) == FALSE, "bson_append_binary () without binary data should fail"); ok (bson_append_binary (b, "binary3", BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", -1) == FALSE, "bson_append_binary () with an invalid length should fail"); ok (bson_append_binary (NULL, "binary1", BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", 7) == FALSE, "bson_append_binary () without a BSON object should fail"); bson_finish (b); cmp_ok (bson_size (b), "==", 5, "BSON object should be empty"); ok (bson_append_binary (b, "binary", BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", 7) == FALSE, "Appending to a finished element should fail"); bson_free (b); }
result_t MongoCursor::count(bool applySkipLimit, int32_t &retVal) { bson bbq; bson_init(&bbq); bson_append_string(&bbq, "count", m_name.c_str()); Isolate* isolate = Isolate::now(); if (m_bSpecial) encodeValue(&bbq, "query", v8::Local<v8::Object>::New(isolate->m_isolate, m_query)->Get(v8::String::NewFromUtf8(isolate->m_isolate, "query"))); else encodeValue(&bbq, "query", v8::Local<v8::Object>::New(isolate->m_isolate, m_query)); if (applySkipLimit) { if (m_cursor->limit) bson_append_int(&bbq, "limit", m_cursor->limit); if (m_cursor->skip) bson_append_int(&bbq, "skip", m_cursor->skip); } bson_finish(&bbq); v8::Local<v8::Object> res; result_t hr = m_cursor->m_db->run_command(&bbq, res); if (hr < 0) return hr; retVal = res->Get(v8::String::NewFromUtf8(isolate->m_isolate, "n"))->Int32Value(); return 0; }
static void tutorial_insert_batch( mongo *conn ) { bson *p; bson **ps; char *names[4]; int ages[] = { 29, 24, 24, 32 }; int i, n = 4; names[0] = "Eliot"; names[1] = "Mike"; names[2] = "Mathias"; names[3] = "Richard"; ps = ( bson ** )malloc( sizeof( bson * ) * n); for ( i = 0; i < n; i++ ) { p = ( bson * )malloc( sizeof( bson ) ); bson_init( p ); bson_append_new_oid( p, "_id" ); bson_append_string( p, "name", names[i] ); bson_append_int( p, "age", ages[i] ); bson_finish( p ); ps[i] = p; } mongo_insert_batch( conn, "tutorial.persons", (const bson **) ps, n, 0, 0 ); for ( i = 0; i < n; i++ ) { bson_destroy( ps[i] ); free( ps[i] ); } }