void test_bson_build_full (void) { bson *b, *o; b = bson_build_full (BSON_TYPE_DOUBLE, "double", FALSE, 3.14, BSON_TYPE_STRING, "str", FALSE, "hello world", -1, BSON_TYPE_DOCUMENT, "doc", TRUE, bson_build (BSON_TYPE_STRING, "name", "sub-document", -1, BSON_TYPE_INT32, "answer", 42, BSON_TYPE_NONE), BSON_TYPE_ARRAY, "array", TRUE, bson_build (BSON_TYPE_INT32, "0", 32, BSON_TYPE_INT64, "1", (gint64)-42, BSON_TYPE_NONE), BSON_TYPE_BINARY, "binary0", FALSE, BSON_BINARY_SUBTYPE_GENERIC, "foo\0bar", 7, BSON_TYPE_OID, "_id", FALSE, "1234567890ab", BSON_TYPE_BOOLEAN, "TRUE", FALSE, FALSE, BSON_TYPE_UTC_DATETIME, "date", FALSE, 1294860709000, BSON_TYPE_TIMESTAMP, "ts", FALSE, 1294860709000, BSON_TYPE_NULL, "null", FALSE, BSON_TYPE_REGEXP, "foobar", FALSE, "s/foo.*bar/", "i", BSON_TYPE_JS_CODE, "alert", FALSE, "alert (\"hello world!\");", -1, BSON_TYPE_SYMBOL, "sex", FALSE, "Marilyn Monroe", -1, BSON_TYPE_JS_CODE_W_SCOPE, "print", TRUE, "alert (v);", -1, bson_build (BSON_TYPE_STRING, "v", "hello world", -1, BSON_TYPE_NONE), BSON_TYPE_INT32, "int32", FALSE, 32, BSON_TYPE_INT64, "int64", FALSE, (gint64)-42, BSON_TYPE_NONE); bson_finish (b); o = test_bson_generate_full (); cmp_ok (bson_size (b), "==", bson_size (o), "bson_build_full() and hand crafted BSON object sizes match"); ok (memcmp (bson_data (b), bson_data (o), bson_size (b)) == 0, "bson_build_full() and hand crafted BSON objects match"); bson_free (b); bson_free (o); b = bson_build_full (BSON_TYPE_UNDEFINED, "undef", FALSE, BSON_TYPE_NONE); ok (b == NULL, "bson_build_full() should fail with an unsupported element type"); b = bson_build_full (BSON_TYPE_STRING, "str", FALSE, "hello", -1, BSON_TYPE_UNDEFINED, "undef", FALSE, BSON_TYPE_NONE); ok (b == NULL, "bson_build_full() should fail with an unsupported element type"); }
void tut_sync_insert (void) { mongo_sync_connection *conn; bson *doc1, *doc2, *doc3; conn = mongo_sync_connect ("localhost", 27017, FALSE); if (!conn) { perror ("mongo_sync_connect()"); exit (1); } doc1 = bson_build (BSON_TYPE_STRING, "hello", "world", -1, BSON_TYPE_INT32, "the_final_answer", 42, BSON_TYPE_BOOLEAN, "yes?", FALSE, BSON_TYPE_INT32, "n", 1, BSON_TYPE_NONE); bson_finish (doc1); if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc1, NULL)) { perror ("mongo_sync_cmd_insert()"); exit (1); } doc2 = bson_build (BSON_TYPE_INT32, "n", 2, BSON_TYPE_BOOLEAN, "yes?", FALSE, BSON_TYPE_STRING, "hello", "dolly", -1, BSON_TYPE_NONE); bson_finish (doc2); doc3 = bson_build (BSON_TYPE_INT32, "n", 3, BSON_TYPE_STRING, "hello", "nurse", -1, BSON_TYPE_BOOLEAN, "yes?", TRUE, BSON_TYPE_NONE); bson_finish (doc3); if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc2, doc3, NULL)) { perror ("mongo_sync_cmd_insert()"); exit (1); } bson_free (doc3); bson_free (doc2); bson_free (doc1); mongo_sync_disconnect (conn); }
void test_mongo_sync_cmd_custom_net_secondary (void) { mongo_sync_connection *conn; bson *cmd; mongo_packet *p; skip (!config.secondary_host, 1, "Secondary server not configured"); conn = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE); cmd = bson_build (BSON_TYPE_INT32, "getnonce", 1, BSON_TYPE_NONE); bson_finish (cmd); p = mongo_sync_cmd_custom (conn, config.db, cmd); ok (p != NULL, "mongo_sync_cmd_custom() works on the secondary too"); mongo_wire_packet_free (p); bson_free (cmd); mongo_sync_disconnect (conn); endskip; }
void mongo_gridfs_remove (config_t *config, gint argc, gchar *argv[]) { mongo_sync_gridfs *gfs; bson *query; gchar *fn; if (argc < 3) { fprintf (stderr, "Usage: %s remove GRIDFS_FILENAME\n", argv[0]); exit (1); } fn = argv[2]; gfs = mongo_gridfs_connect (config); VLOG ("Deleting file: '%s'...\n", fn); query = bson_build (BSON_TYPE_STRING, "filename", fn, -1, BSON_TYPE_NONE); bson_finish (query); if (mongo_sync_gridfs_remove (gfs, query)) { VLOG ("\tDeleted\n"); } else { VLOG ("\tFailed: %s\n", strerror (errno)); } bson_free (query); mongo_sync_gridfs_free (gfs, TRUE); }
void test_mongo_sync_gridfs_chunked_find (void) { mongo_sync_connection *conn; mongo_sync_gridfs *gfs; bson *query; query = bson_build (BSON_TYPE_STRING, "filename", "bogus-fn", -1, BSON_TYPE_NONE); bson_finish (query); ok (mongo_sync_gridfs_chunked_find (NULL, query) == NULL, "mongo_sync_gridfs_chunked_find() fails with a NULL GridFS"); begin_network_tests (2); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix); ok (mongo_sync_gridfs_chunked_find (gfs, NULL) == NULL, "mongo_sync_gridfs_chunked_find() fails with a NULL query"); ok (mongo_sync_gridfs_chunked_find (gfs, query) == NULL, "mongo_sync_gridfs_chunked_find() fails when the file is not found"); mongo_sync_gridfs_free (gfs, TRUE); end_network_tests (); bson_free (query); }
static void do_inserts (mongo_sync_connection *conn) { bson *base; gint i; base = bson_build (BSON_TYPE_STRING, "tutorial-program", "tut_hl_client.c", -1, BSON_TYPE_INT32, "the answer to life, the universe and everything", 42, BSON_TYPE_NONE); bson_finish (base); for (i = 0; i < 1000; i++) { bson *n; n = bson_new_from_data (bson_data (base), bson_size (base) - 1); bson_append_int32 (n, "counter", i); bson_finish (n); if (!mongo_sync_cmd_insert (conn, "lmc.tutorial", n, NULL)) { fprintf (stderr, "Error inserting document %d: %s\n", i, strerror (errno)); exit (1); } bson_free (n); } bson_free (base); }
static void do_query (mongo_sync_connection *conn) { mongo_sync_cursor *c; bson *query; gchar *error = NULL; query = bson_build (BSON_TYPE_TIMESTAMP, "processed", 1294860709000, BSON_TYPE_NONE); bson_finish (query); c = mongo_sync_cursor_new (conn, "blahblah.plurals", mongo_sync_cmd_query (conn, "blahblah.plurals", 0, 0, 10, query, NULL)); if (!c) { fprintf (stderr, "Error creating the query cursor: %s\n", strerror (errno)); exit (1); } bson_free (query); while (mongo_sync_cursor_next (c)) { bson *b = mongo_sync_cursor_get_data (c); bson_cursor *bc; gint32 w_t; gint64 w_i; if (!b) { int e = errno; mongo_sync_cmd_get_last_error (conn, "blahblah.plurals", &error); fprintf (stderr, "Error retrieving cursor data: %s\n", (error) ? error : strerror (e)); exit (1); } bc = bson_find (b, "word_type"); bson_cursor_get_int32 (bc, &w_t); printf ("\rWord type: %d\n", w_t); bc = bson_find (b, "word_id"); bson_cursor_get_int64 (bc, &w_i); printf ("\rWord id: %d\n", (int)w_i); bson_cursor_free (bc); bson_free (b); } printf ("\n"); mongo_sync_cursor_free (c); }
static void do_inserts (mongo_sync_connection *conn) { bson *data1a; data1a = bson_build (BSON_TYPE_TIMESTAMP, "processed", 0, BSON_TYPE_STRING, "encoded_encrypted_data", "SGVsbG8gV29ybGQgCg==", -1, BSON_TYPE_STRING, "user_id", "", -1, BSON_TYPE_INT64, "word_id", 0, BSON_TYPE_INT64, "synset_id", 0, BSON_TYPE_INT32, "word_type", 0, BSON_TYPE_STRING, "data", "", -1, BSON_TYPE_NONE); // Missing values or NULL values are simply not provided. bson_finish (data1a); if (!mongo_sync_cmd_insert (conn, "blahblah.plurals", data1a, NULL)) { fprintf (stderr, "Error inserting document: %s\n", strerror(errno)); exit (1); } bson_free (data1a); bson *data1b; data1b = bson_build (BSON_TYPE_TIMESTAMP, "processed", 1294860709000, BSON_TYPE_STRING, "encoded_encrypted_data", "SGVsbG8gV29ybGQgCg==", -1, BSON_TYPE_STRING, "user_id", "****", -1, BSON_TYPE_INT64, "word_id", 41012531, BSON_TYPE_INT64, "synset_id", 0, BSON_TYPE_INT32, "word_type", 1, BSON_TYPE_STRING, "data", "smörgåsbord", -1, BSON_TYPE_NONE); bson_finish (data1b); if (!mongo_sync_cmd_insert (conn, "blahblah.plurals", data1b, NULL)) { fprintf (stderr, "Error inserting document: %s\n", strerror(errno)); exit (1); } bson_free (data1b); }
NEOERR* mmg_prepare(mmg_conn *db, int flags, int skip, int limit, NEOERR* (*qcbk)(mmg_conn *db, HDF *node, bool lastone), char *sels, char *querys) { if (!db || !querys) return nerr_raise(NERR_ASSERT, "paramter null"); mtc_noise("prepare %s %s", querys, sels); /* * doc query */ if (db->docq) { bson_free(db->docq); db->docq = NULL; } db->docq = mbson_new_from_string(querys, true); if (!db->docq) return nerr_raise(NERR_ASSERT, "build query: %s: %s", querys, strerror(errno)); /* * doc selector */ if (db->docs) { bson_free(db->docs); db->docs = NULL; } if (sels) { db->docs = mbson_new_from_string(sels, false); if (!db->docs) return nerr_raise(NERR_ASSERT, "build selector: %s: %s", sels, strerror(errno)); if (!(flags & MMG_FLAG_GETOID)) bson_append_int32(db->docs, "_id", 0); bson_finish(db->docs); } else if (!(flags & MMG_FLAG_GETOID)) { db->docs = bson_build(BSON_TYPE_INT32, "_id", 0, BSON_TYPE_NONE); bson_finish(db->docs); } /* * later mmg_prepare won't overwrite formal's callback */ if (db->incallback && qcbk != NULL) return nerr_raise(NERR_ASSERT, "already in callback, can't set callback"); if (!db->incallback) db->query_callback = qcbk; db->flags = flags; db->skip = skip; db->limit = limit; db->rescount = 0; return STATUS_OK; }
static void do_query (mongo_sync_connection *conn) { mongo_sync_cursor *c; bson *query; gchar *error = NULL; query = bson_build (BSON_TYPE_STRING, "tutorial-program", "tut_hl_client.c", -1, BSON_TYPE_NONE); bson_finish (query); c = mongo_sync_cursor_new (conn, "lmc.tutorial", mongo_sync_cmd_query (conn, "lmc.tutorial", 0, 0, 10, query, NULL)); if (!c) { fprintf (stderr, "Error creating the query cursor: %s\n", strerror (errno)); exit (1); } bson_free (query); while (mongo_sync_cursor_next (c)) { bson *b = mongo_sync_cursor_get_data (c); bson_cursor *bc; gint32 cnt; if (!b) { int e = errno; mongo_sync_cmd_get_last_error (conn, "lmc", &error); fprintf (stderr, "Error retrieving cursor data: %s\n", (error) ? error : strerror (e)); exit (1); } bc = bson_find (b, "counter"); bson_cursor_get_int32 (bc, &cnt); printf ("\rCounter: %d", cnt); bson_cursor_free (bc); bson_free (b); } printf ("\n"); mongo_sync_cursor_free (c); }
static void delete_old (mongo_sync_connection *conn) { bson *b; b = bson_build (BSON_TYPE_TIMESTAMP, "processed", 1294860709000, BSON_TYPE_NONE); bson_finish (b); //TODO How to delete all records? if (!mongo_sync_cmd_delete(conn, "blahblah.plurals", 0, b)) { fprintf (stderr, "Error creating index: %s\n", strerror(errno)); exit (1); bson_free (b); } }
void test_mongo_sync_gridfs_stream_write (void) { mongo_sync_connection *conn; mongo_sync_gridfs *gfs; mongo_sync_gridfs_stream *stream; bson *meta; guint8 buffer[4096]; mongo_util_oid_init (0); meta = bson_build (BSON_TYPE_STRING, "my-id", "sync_gridfs_stream_write", -1, BSON_TYPE_NONE); bson_finish (meta); ok (mongo_sync_gridfs_stream_write (NULL, buffer, sizeof (buffer)) == FALSE, "mongo_sync_gridfs_stream_write() should fail with a NULL connection"); begin_network_tests (4); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix); stream = mongo_sync_gridfs_stream_new (gfs, meta); ok (mongo_sync_gridfs_stream_write (stream, NULL, sizeof (buffer)) == FALSE, "mongo_sync_gridfs_stream_write() should fail with a NULL buffer"); ok (mongo_sync_gridfs_stream_write (stream, buffer, 0) == FALSE, "mongo_sync_gridfs_stream_write() should fail with 0 size"); ok (mongo_sync_gridfs_stream_write (stream, buffer, sizeof (buffer)) == TRUE, "mongo_sync_gridfs_stream_write() works"); stream->file.type = LMC_GRIDFS_FILE_STREAM_READER; ok (mongo_sync_gridfs_stream_write (stream, buffer, sizeof (buffer)) == FALSE, "mongo_sync_gridfs_stream_write() should fail with a read stream"); mongo_sync_gridfs_stream_close (stream); mongo_sync_gridfs_free (gfs, TRUE); end_network_tests (); bson_free (meta); }
void test_mongo_sync_gridfs_stream_new (void) { mongo_sync_connection *conn; mongo_sync_gridfs *gfs; mongo_sync_gridfs_stream *stream; bson *meta; mongo_util_oid_init (0); meta = bson_build (BSON_TYPE_STRING, "my-id", "sync_gridfs_stream_new", -1, BSON_TYPE_NONE); bson_finish (meta); ok (mongo_sync_gridfs_stream_new (NULL, meta) == FALSE, "mongo_sync_gridfs_stream_new() should fail with a NULL connection"); begin_network_tests (2); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix); stream = mongo_sync_gridfs_stream_new (gfs, NULL); ok (stream != NULL, "mongo_sync_gridfs_stream_new() works with NULL metadata"); mongo_sync_gridfs_stream_close (stream); stream = mongo_sync_gridfs_stream_new (gfs, meta); ok (stream != NULL, "mongo_sync_gridfs_stream_new() works with metadata"); mongo_sync_gridfs_stream_close (stream); mongo_sync_gridfs_free (gfs, TRUE); end_network_tests (); bson_free (meta); }
void test_mongo_sync_cmd_custom_net (void) { mongo_sync_connection *conn; bson *cmd; mongo_packet *p; begin_network_tests (3); conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE); mongo_sync_cmd_is_master (conn); mongo_sync_conn_set_auto_reconnect (conn, TRUE); cmd = bson_build (BSON_TYPE_INT32, "getnonce", 1, BSON_TYPE_NONE); bson_finish (cmd); p = mongo_sync_cmd_custom (conn, config.db, cmd); ok (p != NULL, "mongo_sync_cmd_custom() works"); mongo_wire_packet_free (p); shutdown (conn->super.fd, SHUT_RDWR); sleep (3); p = mongo_sync_cmd_custom (conn, config.db, cmd); ok (p != NULL, "mongo_sync_cmd_custom() automatically reconnects"); mongo_wire_packet_free (p); bson_free (cmd); mongo_sync_disconnect (conn); test_mongo_sync_cmd_custom_net_secondary (); end_network_tests (); }
void test_mongo_sync_gridfs_chunked_file_new_from_buffer (void) { mongo_sync_connection *conn; mongo_sync_gridfs *gfs; bson *metadata; guint8 *buffer; mongo_sync_gridfs_chunked_file *gfile; buffer = g_malloc (BUFFER_SIZE); memset (buffer, 'a', BUFFER_SIZE); conn = test_make_fake_sync_conn (4, TRUE); gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix); metadata = bson_build (BSON_TYPE_STRING, "filename", "gridfs_file_new_from_buffer", -1, BSON_TYPE_NONE); bson_finish (metadata); ok (mongo_sync_gridfs_chunked_file_new_from_buffer (NULL, metadata, buffer, BUFFER_SIZE) == FALSE, "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with a NULL GridFS"); mongo_sync_gridfs_free (gfs, TRUE); begin_network_tests (5); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix); ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata, NULL, BUFFER_SIZE) == FALSE, "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with NULL data"); ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata, buffer, 0) == FALSE, "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with an invalid data size"); ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata, buffer, BUFFER_SIZE) == FALSE, "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with uninitialized OID"); mongo_util_oid_init (0); gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata, buffer, BUFFER_SIZE); ok (gfile != NULL, "mongo_sync_gridfs_chunked_file_new_from_buffer() works with metadata"); mongo_sync_gridfs_chunked_file_free (gfile); gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, NULL, buffer, BUFFER_SIZE); ok (gfile != NULL, "mongo_sync_gridfs_chunked_file_new_from_buffer() works without metadata"); mongo_sync_gridfs_chunked_file_free (gfile); mongo_sync_gridfs_free (gfs, TRUE); end_network_tests (); bson_free (metadata); g_free (buffer); }
void mongo_gridfs_get (config_t *config, gint argc, gchar *argv[]) { mongo_sync_gridfs *gfs; mongo_sync_gridfs_chunked_file *gfile; mongo_sync_cursor *cursor; gint64 n = 0; bson *query; int fd; gchar *gfn, *ofn; if (argc < 4) { fprintf (stderr, "Usage: %s get GRIDFS_FILENAME OUTPUT_FILENAME\n", argv[0]); exit (1); } gfn = argv[2]; ofn = argv[3]; gfs = mongo_gridfs_connect (config); VLOG ("Trying to find '%s'...\n", gfn); query = bson_build (BSON_TYPE_STRING, "filename", gfn, -1, BSON_TYPE_NONE); bson_finish (query); gfile = mongo_sync_gridfs_chunked_find (gfs, query); if (!gfile) mongo_gridfs_error (errno); bson_free (query); VLOG ("Opening output file '%s'...\n", ofn); fd = open (ofn, O_RDWR | O_CREAT | O_TRUNC, 0600); if (fd == -1) { fprintf (stderr, "Error opening output file '%s': %s\n", ofn, strerror (errno)); exit (1); } VLOG ("Writing '%s' -> '%s' (%" G_GINT64_FORMAT " bytes in %" G_GINT64_FORMAT " chunks)\n", gfn, ofn, mongo_sync_gridfs_file_get_length (gfile), mongo_sync_gridfs_file_get_chunks (gfile)); cursor = mongo_sync_gridfs_chunked_file_cursor_new (gfile, 0, 0); if (!cursor) mongo_gridfs_error (errno); while (mongo_sync_cursor_next (cursor)) { gint32 size; guint8 *data; VLOG ("\rWriting chunk %" G_GINT64_FORMAT "...", n++); data = mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, &size); if (!data) mongo_gridfs_error (errno); if (write (fd, data, size) != size) { perror ("write()"); exit (1); } g_free (data); } mongo_sync_cursor_free (cursor); mongo_sync_gridfs_chunked_file_free (gfile); close (fd); mongo_sync_gridfs_free (gfs, TRUE); VLOG("\n"); }
void mongo_gridfs_put (config_t *config, gint argc, gchar *argv[]) { mongo_sync_gridfs *gfs; mongo_sync_gridfs_chunked_file *gfile; bson *meta; int fd; guint8 *data; struct stat st; gchar *gfn, *ifn, *oid_s; if (argc < 4) { fprintf (stderr, "Usage: %s put INPUT_FILENAME GRIDFS_FILENAME\n", argv[0]); exit (1); } ifn = argv[2]; gfn = argv[3]; mongo_util_oid_init (0); gfs = mongo_gridfs_connect (config); VLOG ("Opening input file: '%s'...\n", ifn); fd = open (ifn, O_RDONLY); if (!fd) { fprintf (stderr, "Error opening input file: %s\n", strerror (errno)); exit (1); } if (fstat (fd, &st) != 0) { fprintf (stderr, "Error stat'ing the input file: %s\n", strerror (errno)); exit (1); } data = mmap (NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); if (data == MAP_FAILED) { fprintf (stderr, "Error mmapping the input file: %s\n", strerror (errno)); } meta = bson_build (BSON_TYPE_STRING, "filename", gfn, -1, BSON_TYPE_NONE); bson_finish (meta); VLOG ("Uploading '%s' -> '%s'...\n", ifn, gfn); gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta, data, st.st_size); if (!gfile) mongo_gridfs_error (errno); bson_free (meta); munmap (data, st.st_size); oid_s = mongo_util_oid_as_string (mongo_sync_gridfs_file_get_id (gfile)); printf ("Uploaded file: %s (_id: %s; md5 = %s)\n", gfn, oid_s, mongo_sync_gridfs_file_get_md5 (gfile)); g_free (oid_s); mongo_sync_gridfs_chunked_file_free (gfile); mongo_sync_gridfs_free (gfs, TRUE); }
void tut_sync_query_complex (void) { mongo_sync_connection *conn; mongo_packet *p; mongo_sync_cursor *cursor; bson *query, *select; gint i = 0; conn = mongo_sync_connect ("localhost", 27017, FALSE); if (!conn) { perror ("mongo_sync_connect()"); exit (1); } query = bson_build_full (BSON_TYPE_DOCUMENT, "$query", TRUE, bson_build (BSON_TYPE_BOOLEAN, "yes?", FALSE, BSON_TYPE_NONE), BSON_TYPE_DOCUMENT, "$orderby", TRUE, bson_build (BSON_TYPE_INT32, "n", 1, BSON_TYPE_NONE), BSON_TYPE_NONE); bson_finish (query); select = bson_build (BSON_TYPE_INT32, "hello", 1, BSON_TYPE_INT32, "n", 1, BSON_TYPE_INT32, "yes?", 1, BSON_TYPE_NONE); bson_finish (select); p = mongo_sync_cmd_query (conn, "tutorial.docs", 0, 0, 10, query, select); if (!p) { perror ("mongo_sync_cmd_query()"); exit (1); } bson_free (query); bson_free (select); cursor = mongo_sync_cursor_new (conn, "tutorial.docs", p); if (!cursor) { perror ("mongo_sync_cursor_new()"); exit (1); } while (mongo_sync_cursor_next (cursor)) { const char *hello; gint32 n; gboolean yes; bson *result; bson_cursor *c; result = mongo_sync_cursor_get_data (cursor); if (!result) { perror ("mongo_sync_cursor_get_data()"); exit (1); } c = bson_find (result, "hello"); if (!bson_cursor_get_string (c, &hello)) { perror ("bson_cursor_get_string()"); exit (1); } bson_cursor_free (c); c = bson_find (result, "n"); if (!bson_cursor_get_int32 (c, &n)) { perror ("bson_cursor_get_int32()"); exit (1); } bson_cursor_free (c); c = bson_find (result, "yes?"); if (!bson_cursor_get_boolean (c, &yes)) { perror ("bson_cursor_get_boolean()"); exit (1); } bson_cursor_free (c); printf ("Document #%d: hello=%s; n=%d; yes?=%s\n", i, hello, n, (yes) ? "TRUE" : "FALSE"); bson_free (result); i++; } mongo_sync_cursor_free (cursor); mongo_sync_disconnect (conn); }