static void mongo_replset_check_seed( mongo *conn ) { bson out; bson hosts; const char *data; bson_iterator it; bson_iterator it_sub; const char *host_string; mongo_host_port *host_port = NULL; out.data = NULL; hosts.data = NULL; if( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { if( bson_find( &it, &out, "hosts" ) ) { data = bson_iterator_value( &it ); bson_iterator_from_buffer( &it_sub, data ); /* Iterate over host list, adding each host to the * connection's host list. */ while( bson_iterator_next( &it_sub ) ) { host_string = bson_iterator_string( &it_sub ); host_port = bson_malloc( sizeof( mongo_host_port ) ); mongo_parse_host( host_string, host_port ); if( host_port ) { mongo_replset_add_node( &conn->replset->hosts, host_port->host, host_port->port ); bson_free( host_port ); host_port = NULL; } } } } bson_destroy( &out ); bson_destroy( &hosts ); mongo_close_socket( conn->sock ); conn->sock = 0; conn->connected = 0; }
static int gridfs_insert_file2( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype, gridfile* gfile ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_append_bson(&ret, "metadata", gfile->meta); bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret ); bson_destroy( &ret ); return result; }
int QueryMatch(BinaryStr* query, BinaryStr* obj ) { bson_iterator q,o; bson_type qType,tType; bson qBson,oBson; BSONElem objElem; bson_iterator_from_buffer(&q, query->data); bson_iterator_from_buffer(&o, obj->data); bson_init_finished_data(&qBson,(char*)query->data); bson_init_finished_data(&oBson,(char*)obj->data); while((qType = bson_iterator_next(&q))) { // for all keys in query char *newKey; const char* key = bson_iterator_key(&q); char* keyEnd; BSONElem qVal,oVal; newKey = strdup(key); if (!newKey) { return False; } keyEnd = strchr(newKey,'.'); if (keyEnd) { *keyEnd = '\0'; keyEnd ++; } else { //printf("No dot \n"); keyEnd = newKey+strlen(newKey); } BSONElemInitFromItr(&qVal,&q); tType = bson_find(&o,&oBson,newKey) ; if (!tType) { return False; } BSONElemInitFromItr(&oVal,&o); if (!QueryElemMatch(keyEnd,&oVal,&qVal)) { free(newKey); return False; } free(newKey); } return True; }
bson_bool_t mongo_cmd_ismaster( mongo *conn, bson *realout ) { bson out; // = {NULL,0}; bson_bool_t ismaster = 0; if ( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &out, "ismaster" ); ismaster = bson_iterator_bool( &it ); conn->ismaster = ismaster; } if( realout ) *realout = out; /* transfer of ownership */ else bson_destroy( &out ); return ismaster; }
bool bson_cmp( const bson *a, const bson *b ) { assert( a ); assert( b ); bson_iterator *a_it, *b_it; bool equal; size_t a_keys_count, b_keys_count; equal = true; a_it = malloc( sizeof( bson_iterator ) ); b_it = malloc( sizeof( bson_iterator ) ); bson_iterator_init( a_it, a ); bson_iterator_init( b_it, b ); a_keys_count = bson_keypars_count( a ); b_keys_count = bson_keypars_count( b ); if( a_keys_count == b_keys_count ) { while( bson_iterator_next( a_it ) ) { bson_type a_key_type = bson_iterator_type( a_it ); const char *a_key = bson_iterator_key( a_it ); bson_type b_key_type = bson_find( b_it, b, a_key ); if( b_key_type == BSON_EOO || a_key_type != b_key_type || !bson_values_are_equal( a_key_type, a_it, b_it ) ) { equal = false; break; } } } else { equal = false; } free( a_it ); free( b_it ); return equal; }
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", start ); } else { bson_init( >e ); bson_append_int( >e, "$gte", start ); bson_finish( >e ); bson_append_bson( &query, "n", >e ); bson_destroy( >e ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; }
MONGO_EXPORT bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { bson from_db; bson cmd; bson out; const char *nonce; int result; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &from_db, "nonce" ); nonce = bson_iterator_string( &it ); } else { return MONGO_ERROR; } mongo_pass_digest( user, pass, hex_digest ); mongo_md5_init( &st ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, strlen( nonce ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); mongo_md5_finish( &st, digest ); digest2hex( digest, hex_digest ); bson_init( &cmd ); bson_append_int( &cmd, "authenticate", 1 ); bson_append_string( &cmd, "user", user ); bson_append_string( &cmd, "nonce", nonce ); bson_append_string( &cmd, "key", hex_digest ); bson_finish( &cmd ); bson_destroy( &from_db ); result = mongo_run_command( conn, db, &cmd, &out ); bson_destroy( &from_db ); bson_destroy( &cmd ); return result; }
static bson_bool_t mongodb_cmd_ping(mongo_connection *conn, const char *db) { bson out; bson_bool_t res; bson_iterator it; memset(&out, 0, sizeof(bson)); res = mongo_simple_int_command(conn, db, "ping", 1, &out); if (res) { if (bson_find(&it, &out, "ok") == bson_eoo) { res = 0; } else { res = bson_iterator_bool(&it); } } bson_destroy(&out); return res; }
gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ) { int i; size_t len; bson chunk; bson_iterator it; const char *data; const int num = gridfile_get_numchunks( gfile ); for ( i=0; i<num; i++ ) { chunk = gridfile_get_chunk( gfile, i ); bson_find( &it, &chunk, "data" ); len = bson_iterator_bin_len( &it ); data = bson_iterator_bin_data( &it ); fwrite( data , sizeof( char ), len, stream ); bson_destroy( &chunk ); } return gridfile_get_contentlength( gfile ); }
char *mongodb_retrieve_key(bot_t * bot, char *db, char *key, char *which) { char *str = NULL; bson b; mongo_cursor cursor; if (!db || !key || !which) { return NULL; } debug(bot, "mongodb_retrieve_key: Entered :db=%s. key=%s, which=%s\n", db, key, which); bson_init(&b); bson_append_string(&b, "key", key); bson_finish(&b); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) == MONGO_OK) { bson_iterator i; debug(bot, "mongodb_retrieve_key: Found!\n"); if (bson_find(&i, mongo_cursor_bson(&cursor), which)) { str = (char *)bson_iterator_string(&i); str = strdup(str); } bson_destroy(&b); mongo_cursor_destroy(&cursor); return str; } debug(bot, "mongodb_retrieve_key: Key not found\n"); bson_destroy(&b); mongo_cursor_destroy(&cursor); return NULL; }
bson gridfile_get_chunk( gridfile *gfile, int n ) { bson query; bson out; bson_iterator it; bson_oid_t id; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); assert( mongo_find_one( gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out ) == MONGO_OK ); bson_destroy( &query ); return out; }
mongo_cursor* gridfile_get_chunks(gridfile* gfile, int start, int size) { bson_iterator it; bson_oid_t id; bson_buffer gte_buf; bson gte_bson; bson_buffer query_buf; bson query_bson; bson_buffer orderby_buf; bson orderby_bson; bson_buffer command_buf; bson command_bson; bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_buffer_init(&query_buf); bson_append_oid(&query_buf, "files_id", &id); if (size == 1) { bson_append_int(&query_buf, "n", start); } else { bson_buffer_init(>e_buf); bson_append_int(>e_buf, "$gte", start); bson_from_buffer(>e_bson, >e_buf); bson_append_bson(&query_buf, "n", >e_bson); } bson_from_buffer(&query_bson, &query_buf); bson_buffer_init(&orderby_buf); bson_append_int(&orderby_buf, "n", 1); bson_from_buffer(&orderby_bson, &orderby_buf); bson_buffer_init(&command_buf); bson_append_bson(&command_buf, "query", &query_bson); bson_append_bson(&command_buf, "orderby", &orderby_bson); bson_from_buffer(&command_bson, &command_buf); return mongo_find(gfile->gfs->client, gfile->gfs->chunks_ns, &command_bson, NULL, size, 0, 0); }
bson gridfile_get_chunk(gridfile* gfile, int n) { bson query; bson out; bson_buffer buf; bson_iterator it; bson_oid_t id; bson_buffer_init(&buf); bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", n); bson_from_buffer(&query, &buf); assert(mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out)); return out; }
gridfs_offset gridfile_write_buffer(gridfile* gfile, char * buf) { int i; gridfs_offset len; bson chunk; bson_iterator it; const char* data; const int num = gridfile_get_numchunks( gfile ); for ( i = 0; i < num; i++ ){ chunk = gridfile_get_chunk( gfile, i ); bson_find( &it, &chunk, "data" ); len = bson_iterator_bin_len( &it ); data = bson_iterator_bin_data( &it ); memcpy( buf, data, len); buf += len; } return gridfile_get_contentlength(gfile); }
static void tutorial_simple_query( mongo *conn ) { bson query[1]; mongo_cursor cursor[1]; bson_init( query ); bson_append_int( query, "age", 24 ); bson_finish( query ); mongo_cursor_init( cursor, conn, "tutorial.persons" ); mongo_cursor_set_query( cursor, query ); while( mongo_cursor_next( cursor ) == MONGO_OK ) { bson_iterator iterator[1]; if ( bson_find( iterator, mongo_cursor_bson( cursor ), "name" )) { printf( "name: %s\n", bson_iterator_string( iterator ) ); } } bson_destroy( query ); mongo_cursor_destroy( cursor ); }
static bson gridfs_insert_file( gridfs* gfs, const char* name, const bson_oid_t id, gridfs_offset length, const char* contenttype) { bson command; bson res; bson ret; bson_buffer buf; bson_iterator it; /* Check run md5 */ bson_buffer_init(&buf); bson_append_oid(&buf, "filemd5", &id); bson_append_string(&buf, "root", gfs->prefix); bson_from_buffer(&command, &buf); assert(mongo_run_command(gfs->client, gfs->dbname, &command, &res)); bson_destroy(&command); /* Create and insert BSON for file metadata */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); if (name != NULL && *name != '\0') { bson_append_string(&buf, "filename", name); } bson_append_int(&buf, "length", length); bson_append_int(&buf, "chunkSize", DEFAULT_CHUNK_SIZE); bson_append_date(&buf, "uploadDate", (bson_date_t)1000*time(NULL)); bson_find(&it, &res, "md5"); bson_append_string(&buf, "md5", bson_iterator_string(&it)); bson_destroy(&res); if (contenttype != NULL && *contenttype != '\0') { bson_append_string(&buf, "contentType", contenttype); } bson_from_buffer(&ret, &buf); mongo_insert(gfs->client, gfs->files_ns, &ret); return ret; }
int mongodb_retrieve_key_stat(bot_t * bot, char *db, char *key) { bson b; mongo_cursor cursor; int ret = 0; if (!db || !key) { return -1; } debug(bot, "mongodb_retrieve_key: Entered\n"); bson_init(&b); bson_append_string(&b, "key", key); bson_finish(&b); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) == MONGO_OK) { bson_iterator i; debug(bot, "mongodb_retrieve_key: Found!\n"); if (bson_find(&i, mongo_cursor_bson(&cursor), "value")) { ret = (int)bson_iterator_int(&i); } bson_destroy(&b); mongo_cursor_destroy(&cursor); return ret; } debug(bot, "mongodb_retrieve_key: Key not found\n"); bson_destroy(&b); mongo_cursor_destroy(&cursor); return -1; }
void gridfs_remove_filename(gridfs* gfs, const char* filename ) { bson query; bson_buffer buf; mongo_cursor* files; bson file; bson_iterator it; bson_oid_t id; bson b; bson_buffer_init(&buf); bson_append_string(&buf, "filename", filename); bson_from_buffer(&query, &buf); files = mongo_find(gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0); bson_destroy(&query); /* Remove each file and it's chunks from files named filename */ while (mongo_cursor_next(files)) { file = files->current; bson_find(&it, &file, "_id"); id = *bson_iterator_oid(&it); /* Remove the file with the specified id */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); bson_from_buffer(&b, &buf); mongo_remove( gfs->client, gfs->files_ns, &b); bson_destroy(&b); /* Remove all chunks from the file with the specified id */ bson_buffer_init(&buf); bson_append_oid(&buf, "files_id", &id); bson_from_buffer(&b, &buf); mongo_remove( gfs->client, gfs->chunks_ns, &b); bson_destroy(&b); } }
static int mongo_check_is_master( mongo *conn ) { bson out; bson_iterator it; bson_bool_t ismaster = 0; out.data = NULL; if ( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { if( bson_find( &it, &out, "ismaster" ) ) ismaster = bson_iterator_bool( &it ); } else { return MONGO_ERROR; } bson_destroy( &out ); if( ismaster ) return MONGO_OK; else { conn->err = MONGO_CONN_NOT_MASTER; return MONGO_ERROR; } }
void test_func_mongo_sync_oidtest (void) { mongo_sync_connection *conn; bson *boid, *reply = NULL; bson_cursor *c; mongo_packet *p; guint8 *oid; const guint8 *noid; mongo_util_oid_init (0); oid = mongo_util_oid_new (1); boid = bson_new (); bson_append_oid (boid, "driverOIDTest", oid); bson_finish (boid); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); p = mongo_sync_cmd_custom (conn, config.db, boid); ok (p != NULL, "driverOIDTest(OID) custom command works"); mongo_wire_reply_packet_get_nth_document (p, 1, &reply); bson_finish (reply); c = bson_find (reply, "oid"); bson_cursor_get_oid (c, &noid); ok (memcmp (oid, noid, 12) == 0, "driverOIDTest(OID) returns the same OID"); bson_cursor_free (c); mongo_sync_disconnect (conn); mongo_wire_packet_free (p); bson_free (boid); bson_free (reply); }
MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { bson query; bson_iterator it; bson_oid_t id; int result; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); result = (mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, out ) == MONGO_OK ); bson_destroy( &query ); if (!result) { bson empty; bson_empty(&empty); bson_copy(out, &empty); } }
uint8_t QueryObjMatch(bson* obj,bson* query) { bson_iterator oItr,qItr; bson_type o,q; bson_iterator_init(&oItr,obj); bson_iterator_init(&qItr,query); while ((o=bson_iterator_next(&oItr))) { BSONElem oVal,qVal; int cVal; q = bson_find(&qItr,query,bson_iterator_key(&oItr)); if (!q) { return False; } if ( (o == BSON_OBJECT && q != BSON_OBJECT ) || (q == BSON_OBJECT && o != BSON_OBJECT ) ) { return False; } if ( o == q && o == BSON_OBJECT) { bson o1,q1; bson_iterator_subobject(&oItr,&o1); bson_iterator_subobject(&qItr,&q1); return QueryObjMatch(&o1,&q1); } BSONElemInitFromItr(&oVal,&oItr); BSONElemInitFromItr(&qVal,&qItr); if (CompareElem(&oVal,&qVal,&cVal)) { return False; } if (cVal) { return False; } } return True; }
MONGO_EXPORT double mongo_count( mongo *conn, const char *db, const char *ns, const bson *query ) { bson cmd; bson out = {NULL, 0}; double count = -1; bson_init( &cmd ); bson_append_string( &cmd, "count", ns ); if ( query && bson_size( query ) > 5 ) /* not empty */ bson_append_bson( &cmd, "query", query ); bson_finish( &cmd ); if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "n" ) ) count = bson_iterator_double( &it ); bson_destroy( &cmd ); bson_destroy( &out ); return count; } else { bson_destroy( &out ); bson_destroy( &cmd ); return MONGO_ERROR; } }
INT32 migExport::_run( const CHAR *pCSName, const CHAR *pCLName, INT32 &total ) { INT32 rc = SDB_OK ; const CHAR *pTemp = NULL ; bson obj ; bson_iterator it ; bson_type type ; bson_init( &obj ) ; if ( ( pCSName == NULL && pCLName == NULL ) || ( pCSName == NULL && pCLName != NULL ) ) { //never cs cl rc = _getCSList() ; if ( rc ) { PD_LOG ( PDERROR, "Failed to get collection space list, rc = %d", rc ) ; goto error ; } while( TRUE ) { rc = sdbNext( _gCSList, &obj ) ; if ( rc ) { if ( SDB_DMS_EOC != rc ) { PD_LOG ( PDERROR, "Failed to get collection space list, rc = %d", rc ) ; goto error ; } else { rc = SDB_OK ; goto done ; } } type = bson_find( &it, &obj, "Name" ) ; if ( type != BSON_STRING ) { rc = SDB_SYS ; PD_LOG ( PDERROR, "List collection space does not string, rc = %d", rc ) ; goto error ; } pTemp = bson_iterator_string( &it ) ; rc = _run( pTemp, pCLName, total ) ; if ( rc ) { PD_LOG ( PDERROR, "Faild to call _run, rc = %d", rc ) ; goto error ; } } } else if ( pCSName != NULL && pCLName == NULL ) { //cs rc = _getCLList() ; if ( rc ) { PD_LOG ( PDERROR, "Failed to get collection list, rc = %d", rc ) ; goto error ; } while ( TRUE ) { rc = sdbNext( _gCLList, &obj ) ; if ( rc ) { if ( SDB_DMS_EOC != rc ) { PD_LOG ( PDERROR, "Failed to get collection list, rc = %d", rc ) ; goto error ; } else { rc = SDB_OK ; goto done ; } } type = bson_find( &it, &obj, "Name" ) ; if ( type != BSON_STRING ) { rc = SDB_SYS ; PD_LOG ( PDERROR, "List collection does not string, rc = %d", rc ) ; goto error ; } pTemp = bson_iterator_string( &it ) ; rc = _run( pCSName, pTemp, total ) ; if ( rc ) { PD_LOG ( PDERROR, "Faild to call _run, rc = %d", rc ) ; goto error ; } } } else { //cs and cl rc = _exportCL( pCSName, pCLName, total ) ; if ( rc ) { PD_LOG ( PDERROR, "Faild to call _export, rc = %d", rc ) ; goto error ; } } done: bson_destroy ( &obj ) ; return rc ; error: goto done ; }
bson_bool_t gridfile_get_boolean( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_bool( &it ); }
const char *gridfile_get_field( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_value( &it ); }
const char *gridfile_get_md5( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "md5" ); return bson_iterator_string( &it ); }
bson_date_t gridfile_get_uploaddate( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "uploadDate" ); return bson_iterator_date( &it ); }
int gridfile_get_chunksize( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "chunkSize" ); return bson_iterator_int( &it ); }
void test_mongo_wire_cmd_insert_n (void) { bson *ins, *tmp; const bson *docs[10]; mongo_packet *p; mongo_packet_header hdr; const guint8 *data; gint32 data_size; bson_cursor *c; gint32 pos; ins = test_bson_generate_full (); tmp = bson_new (); docs[0] = ins; docs[1] = tmp; docs[2] = ins; docs[3] = ins; docs[4] = NULL; docs[5] = ins; ok (mongo_wire_cmd_insert_n (1, NULL, 1, docs) == NULL, "mongo_wire_cmd_insert_n() fails with a NULL namespace"); ok (mongo_wire_cmd_insert_n (1, "test.ns", 1, NULL) == NULL, "mongo_wire_cmd_insert_n() fails with no documents"); ok (mongo_wire_cmd_insert_n (1, "test.ns", 0, docs) == NULL, "mongo_wire_cmd_insert_n() fails with no documents"); ok (mongo_wire_cmd_insert_n (1, "test.ns", 2, docs) == NULL, "mongo_wire_cmd_insert_n() fails with an unfinished document"); bson_finish (tmp); ok (mongo_wire_cmd_insert_n (1, "test.ns", 5, docs) == NULL, "mongo_wire_cmd_insert_n() fails with a NULL document in the array"); ok ((p = mongo_wire_cmd_insert_n (1, "test.ns", 3, docs)) != NULL, "mongo_wire_cmd_insert() works"); bson_free (ins); bson_free (tmp); /* Test basic header data */ mongo_wire_packet_get_header (p, &hdr); cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1, "Packet data size appears fine"); cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size, "Packet header length is correct"); cmp_ok (hdr.id, "==", 1, "Header ID is ok"); cmp_ok (hdr.resp_to, "==", 0, "Response ID is ok"); /* * Test the first document */ /* pos = zero + collection_name + NULL */ pos = sizeof (gint32) + strlen ("test.ns") + 1; ok ((ins = bson_new_from_data (data + pos, _DOC_SIZE (data, pos) - 1)) != NULL, "First document is included"); bson_finish (ins); ok ((c = bson_find (ins, "int32")) != NULL, "BSON contains 'int32'"); cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32, "int32 has correct type"); bson_cursor_next (c); cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64, "next element has correct type too"); ok (bson_cursor_next (c) == FALSE, "No more data after the update BSON object"); bson_cursor_free (c); /* * Test the second document */ pos += bson_size (ins); ok ((tmp = bson_new_from_data (data + pos, _DOC_SIZE (data, pos) - 1)) != NULL, "Second document is included"); bson_finish (tmp); cmp_ok (bson_size (tmp), "==", 5, "Second document is empty"); bson_free (ins); bson_free (tmp); mongo_wire_packet_free (p); }