int test_bson_size( void ) { bson bsmall[1]; bson_init( bsmall ); bson_append_int( bsmall, "a", 1 ); bson_finish( bsmall ); ASSERT( bson_size( bsmall ) == 12 ); bson_destroy( bsmall ); return 0; }
void create_capped_collection( mongo *conn ) { bson b; bson_init( &b ); bson_append_string( &b, "create", "cursors" ); bson_append_bool( &b, "capped", 1 ); bson_append_int( &b, "size", 1000000 ); bson_finish( &b ); ASSERT( mongo_run_command( conn, "test", &b, NULL ) == MONGO_OK ); bson_destroy( &b ); }
static bson gridfs_insert_file( gridfs* gfs, const char* name, const bson_oid_t id, gridfs_offset length, const char* contenttype) { bson command; bson res; bson ret; bson_buffer buf; bson_iterator it; /* Check run md5 */ bson_buffer_init(&buf); bson_append_oid(&buf, "filemd5", &id); bson_append_string(&buf, "root", gfs->prefix); bson_from_buffer(&command, &buf); assert(mongo_run_command(gfs->client, gfs->dbname, &command, &res)); bson_destroy(&command); /* Create and insert BSON for file metadata */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); if (name != NULL && *name != '\0') { bson_append_string(&buf, "filename", name); } bson_append_int(&buf, "length", length); bson_append_int(&buf, "chunkSize", DEFAULT_CHUNK_SIZE); bson_append_date(&buf, "uploadDate", (bson_date_t)1000*time(NULL)); bson_find(&it, &res, "md5"); bson_append_string(&buf, "md5", bson_iterator_string(&it)); bson_destroy(&res); if (contenttype != NULL && *contenttype != '\0') { bson_append_string(&buf, "contentType", contenttype); } bson_from_buffer(&ret, &buf); mongo_insert(gfs->client, gfs->files_ns, &ret); return ret; }
bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { bson from_db; bson cmd; bson out; const char *nonce; bson_bool_t success = 0; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &from_db, "nonce" ); nonce = bson_iterator_string( &it ); } else { return MONGO_ERROR; } mongo_pass_digest( user, pass, hex_digest ); mongo_md5_init( &st ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, strlen( nonce ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); mongo_md5_finish( &st, digest ); digest2hex( digest, hex_digest ); bson_init( &cmd ); bson_append_int( &cmd, "authenticate", 1 ); bson_append_string( &cmd, "user", user ); bson_append_string( &cmd, "nonce", nonce ); bson_append_string( &cmd, "key", hex_digest ); bson_finish( &cmd ); bson_destroy( &from_db ); /*bson_init( &from_db ); */ if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "ok" ) ) success = bson_iterator_bool( &it ); } bson_destroy( &from_db ); bson_destroy( &cmd ); if( success ) return MONGO_OK; else return MONGO_ERROR; }
void testSaveLoad() { CU_ASSERT_PTR_NOT_NULL_FATAL(jb); bson_oid_t oid; EJCOLL *ccoll = ejdbcreatecoll(jb, "contacts", NULL); CU_ASSERT_PTR_NOT_NULL(ccoll); //Save record bson a1; bson_init(&a1); bson_append_string(&a1, "name", "Петров Петр"); bson_append_string(&a1, "phone", "333-222-333"); bson_append_int(&a1, "age", 33); bson_append_long(&a1, "longage", 0xFFFFFFFFFF01LL); bson_append_double(&a1, "doubleage", 0.333333); bson_finish(&a1); ejdbsavebson(ccoll, &a1, &oid); bson_destroy(&a1); bson *lbson = ejdbloadbson(ccoll, &oid); CU_ASSERT_PTR_NOT_NULL(lbson); bson_iterator it1; bson_iterator_init(&it1, lbson); int btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_OID); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_STRING); CU_ASSERT(!strcmp("name", bson_iterator_key(&it1))); CU_ASSERT(!strcmp("Петров Петр", bson_iterator_string(&it1))); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_STRING); CU_ASSERT(!strcmp("phone", bson_iterator_key(&it1))); CU_ASSERT(!strcmp("333-222-333", bson_iterator_string(&it1))); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_INT); CU_ASSERT(!strcmp("age", bson_iterator_key(&it1))); CU_ASSERT(33 == bson_iterator_int(&it1)); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_LONG); CU_ASSERT(!strcmp("longage", bson_iterator_key(&it1))); CU_ASSERT(0xFFFFFFFFFF01LL == bson_iterator_long(&it1)); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_DOUBLE); CU_ASSERT(!strcmp("doubleage", bson_iterator_key(&it1))); CU_ASSERT_DOUBLE_EQUAL(bson_iterator_double(&it1), 0.3, 0.1); btype = bson_iterator_next(&it1); CU_ASSERT(btype == BSON_EOO); bson_del(lbson); }
void bson_from_json_type(bson *b, yajl_val value, const char* key) { if( YAJL_IS_STRING( value ) ) { char* string = YAJL_GET_STRING( value ); bson_append_string( b, key, string ); } else if( YAJL_IS_NUMBER( value ) ) { char* string = value->u.number.r; size_t len = strlen( string ); // Hack to detect a double, since 'flags' is always set to double. if( memchr( string, '.', len ) || memchr( string, 'e', len ) || memchr( string, 'E', len ) ) { double number = YAJL_GET_DOUBLE( value ); bson_append_double( b, key, number ); } else { uint64_t number = YAJL_GET_INTEGER( value ); if( number <= INT_MIN && number <= INT_MAX) bson_append_int( b, key, (int)number ); else bson_append_long( b, key, number); } } else if ( YAJL_IS_FALSE( value ) ) { bson_append_bool( b, key, 0 ); } else if ( YAJL_IS_TRUE( value ) ) { bson_append_bool( b, key, 1 ); } else if ( YAJL_IS_ARRAY( value ) ) { bson_append_start_array( b, key ); bson_from_json_array( b, value ); bson_append_finish_array( b ); } else if ( YAJL_IS_OBJECT( value ) ) { bson_append_start_object( b, key ); bson_from_json_object( b, value ); bson_append_finish_object( b ); } }
void log_api(uint32_t index, int is_success, uintptr_t return_value, uint64_t hash, last_error_t *lasterr, ...) { va_list args; char idx[4]; va_start(args, lasterr); EnterCriticalSection(&g_mutex); if(g_api_init[index] == 0) { log_explain(index); g_api_init[index] = 1; } LeaveCriticalSection(&g_mutex); bson b; bson_init_size(&b, mem_suggested_size(1024)); bson_append_int(&b, "I", index); bson_append_int(&b, "T", get_current_thread_id()); bson_append_int(&b, "t", get_tick_count() - g_starttick); bson_append_long(&b, "h", hash); // If failure has been determined, then log the last error as well. if(is_success == 0) { bson_append_int(&b, "e", lasterr->lasterror); bson_append_int(&b, "E", lasterr->nt_status); } #if DEBUG if(index != sig_index_exception()) { _log_stacktrace(&b); } #endif bson_append_start_array(&b, "args"); bson_append_int(&b, "0", is_success); bson_append_long(&b, "1", return_value); int argnum = 2, override = 0; for (const char *fmt = sig_paramtypes(index); *fmt != 0; fmt++) { ultostr(argnum++, idx, 10); // Limitation override. Instead of displaying this right away in the // report we turn it into a buffer (much like the dropped files). if(*fmt == '!') { override = 1; argnum--; fmt++; } if(*fmt == 's') { const char *s = va_arg(args, const char *); log_string(&b, idx, s, copy_strlen(s)); } else if(*fmt == 'S') {
void insert_sample_data( mongo *conn, int n ) { bson b; int i; for( i=0; i<n; i++ ) { bson_init( &b ); bson_append_int( &b, "a", i ); bson_finish( &b ); mongo_insert( conn, "test.cursors", &b, NULL ); bson_destroy( &b ); } }
void InitWith(bson *obj) { bson_iterator it; bson_iterator_init(&it, obj); while (bson_iterator_next(&it)) { const char * key = bson_iterator_key(&it); bson_type type = bson_iterator_type(&it); switch (type) { case BSON_STRING: bson_append_string(object, key, bson_iterator_string(&it)); break; case BSON_INT: bson_append_int(object, key, bson_iterator_int(&it)); break; case BSON_LONG: bson_append_long(object, key, bson_iterator_long(&it)); break; case BSON_DOUBLE: bson_append_double(object, key, bson_iterator_double(&it)); break; case BSON_OBJECT: { bson sub; bson_iterator_subobject(&it, &sub); bson_append_bson(object, key, &sub); break; } case BSON_OID: bson_append_oid(object, key, bson_iterator_oid(&it)); break; default: DVASSERT(false); Logger::Error("[MongodbObjectInternalData::InitWith] Not implemented type: %d", type); break; } } }
static void tutorial_index( mongo *conn ) { bson key[1]; bson_init( key ); bson_append_int( key, "name", 1 ); bson_finish( key ); mongo_create_index( conn, "tutorial.persons", key, NULL, 0, NULL ); bson_destroy( key ); printf( "simple index created on \"name\"\n" ); bson_init( key ); bson_append_int( key, "age", 1 ); bson_append_int( key, "name", 1 ); bson_finish( key ); mongo_create_index( conn, "tutorial.persons", key, NULL, 0, NULL ); bson_destroy( key ); printf( "compound index created on \"age\", \"name\"\n" ); }
void testTicket135(void) { bson bo_test; bson_init(&bo_test); bson_append_int(&bo_test, "myInt", 10); bson_append_double(&bo_test, "myDouble", -50.0); bson_finish(&bo_test); char* buf = NULL; int lenght = 0; bson2json(bson_data(&bo_test), &buf, &lenght); CU_ASSERT_PTR_NOT_NULL_FATAL(buf); CU_ASSERT_PTR_NOT_NULL(strstr(buf, "\"myInt\" : 10")); CU_ASSERT_PTR_NOT_NULL(strstr(buf, "\"myDouble\" : -50.000000")); bson_destroy(&bo_test); TCFREE(buf); }
int test_tailable( mongo *conn ) { mongo_cursor *cursor; bson b; int count; remove_sample_data( conn ); create_capped_collection( conn ); insert_sample_data( conn, 10000 ); bson_init( &b ); bson_append_start_object( &b, "$query" ); bson_append_finish_object( &b ); bson_append_start_object( &b, "$sort" ); bson_append_int( &b, "$natural", -1 ); bson_append_finish_object( &b ); bson_finish( &b ); cursor = mongo_find( conn, "test.cursors", &b, bson_shared_empty( ), 0, 0, MONGO_TAILABLE ); bson_destroy( &b ); count = 0; while( mongo_cursor_next( cursor ) == MONGO_OK ) count++; ASSERT( count == 10000 ); ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); ASSERT( cursor->err == MONGO_CURSOR_PENDING ); insert_sample_data( conn, 10 ); count = 0; while( mongo_cursor_next( cursor ) == MONGO_OK ) { count++; } ASSERT( count == 10 ); ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); ASSERT( cursor->err == MONGO_CURSOR_PENDING ); mongo_cursor_destroy( cursor ); remove_sample_data( conn ); return 0; }
static bson * chunk_new(bson_oid_t id, int chunkNumber, const char * data, int len) { bson * b; bson_buffer buf; b = (bson *)malloc(sizeof(bson)); if (b == NULL) return NULL; bson_buffer_init(&buf); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", chunkNumber); bson_append_binary(&buf, "data", 2, data, len); bson_from_buffer(b, &buf); return b; }
static int gridfs_insert_file2( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype, gridfile* gfile ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_append_bson(&ret, "metadata", gfile->meta); bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret ); bson_destroy( &ret ); return result; }
int commit_inode(struct inode * e) { bson cond, doc; mongo * conn = get_conn(); char istr[4]; struct dirent * cde = e->dirents; int res; bson_init(&doc); bson_append_start_object(&doc, "$set"); bson_append_start_array(&doc, "dirents"); res = 0; while(cde) { bson_numstr(istr, res++); bson_append_string(&doc, istr, cde->path); cde = cde->next; } bson_append_finish_array(&doc); bson_append_int(&doc, "mode", e->mode); bson_append_long(&doc, "owner", e->owner); bson_append_long(&doc, "group", e->group); bson_append_long(&doc, "size", e->size); bson_append_time_t(&doc, "created", e->created); bson_append_time_t(&doc, "modified", e->modified); if(e->data && e->datalen > 0) bson_append_string_n(&doc, "data", e->data, e->datalen); bson_append_finish_object(&doc); bson_finish(&doc); bson_init(&cond); bson_append_oid(&cond, "_id", &e->oid); bson_finish(&cond); res = mongo_update(conn, inodes_name, &cond, &doc, MONGO_UPDATE_UPSERT, NULL); bson_destroy(&cond); bson_destroy(&doc); if(res != MONGO_OK) { fprintf(stderr, "Error committing inode %s\n", mongo_get_server_err_string(conn)); return -EIO; } return 0; }
UInfo* MongoLink::query(int uid) { UInfo* uinfo = NULL; bson query[1]; mongo_cursor cursor[1]; bson_init( query ); bson_append_int( query, "uid", uid ); bson_finish( query ); mongo_cursor_init( cursor, &m_mongo, m_strDBName.c_str() ); mongo_cursor_set_query( cursor, query ); if( mongo_cursor_next( cursor ) == MONGO_OK ) { bson_iterator iterator[1]; uinfo = new UInfo(); //uinfo->linkid = 0; if ( bson_find( iterator, mongo_cursor_bson( cursor ), "uid" ) != BSON_EOO ) { uinfo->uid= bson_iterator_int( iterator ); } if ( bson_find( iterator, mongo_cursor_bson( cursor ), "router" ) != BSON_EOO ) { uinfo->router= bson_iterator_string( iterator ); } if ( bson_find( iterator, mongo_cursor_bson( cursor ), "dispatcher" ) != BSON_EOO ) { uinfo->dispatcher= bson_iterator_string( iterator ); } if ( bson_find( iterator, mongo_cursor_bson( cursor ), "proxy" ) != BSON_EOO ) { uinfo->proxy= bson_iterator_string( iterator ); } } else { LOG(TAG_ROUTER, "query uid failed, uid=%d.", uid); } exit: bson_destroy( query ); mongo_cursor_destroy( cursor ); return uinfo; }
/* *---------------------------------------------------------------------- * * mongotcl_tcllist_to_cursor_fields -- * * Takes a Tcl list that should contain pairs of field names and * 0/1 values and a mongotcl cursor clientdata structure. * * If successful, sets a bson object in the cursor client data to * contain the equivalent, appropriate bson for passing to * mongo_cursor_set_fields * * If unsuccessful, returns TCL_ERROR and sets the bson pointer * to NULL. * * Results: * A standard Tcl result. * * *---------------------------------------------------------------------- */ int mongotcl_tcllist_to_cursor_fields (Tcl_Interp *interp, Tcl_Obj *fieldList, mongotcl_cursorClientData *mc) { Tcl_Obj **listObjv; int listObjc; int i; if (Tcl_ListObjGetElements (interp, fieldList, &listObjc, &listObjv) == TCL_ERROR) { Tcl_AddErrorInfo (interp, "while reading field list"); return TCL_ERROR; } if (listObjc & 1) { Tcl_SetObjResult (interp, Tcl_NewStringObj ("field list must have even number of elements", -1)); return TCL_ERROR; } if (mc->fieldsBson == NULL) { mc->fieldsBson = (bson *)ckalloc(sizeof(bson)); } bson_init(mc->fieldsBson); for (i = 0; i < listObjc; i += 2) { int want; char *key = Tcl_GetString (listObjv[i]); if (Tcl_GetIntFromObj (interp, listObjv[i+1], &want) == TCL_ERROR) { bson_error: return mongotcl_setBsonError (interp, mc->fieldsBson); } if (bson_append_int (mc->fieldsBson, key, want) != BSON_OK) { goto bson_error; } } if (bson_finish (mc->fieldsBson) != BSON_OK) { goto bson_error; } mongo_cursor_set_fields (mc->cursor, mc->fieldsBson); return TCL_OK; }
bson gridfile_get_chunk( gridfile *gfile, int n ) { bson query; bson out; bson_iterator it; bson_oid_t id; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); assert( mongo_find_one( gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out ) == MONGO_OK ); bson_destroy( &query ); return out; }
int main() { /* * We assume objects in the form of {_id:<any_id>, list:[{a:<int>,b:<int>}, ...]} */ mongo conn[1]; mongo_init(conn); if(MONGO_OK != mongo_client(conn, "127.0.0.1", 27017)) return 1; bson b[1], b_result[1]; /*create the aggregation command in bson*/ bson_init(b); bson_append_string(b, "aggregate", "agg"); bson_append_start_array(b, "pipeline"); bson_append_start_object(b,"0"); bson_append_string(b, "$unwind", "$list"); bson_append_finish_object(b); bson_append_start_object(b,"1"); bson_append_start_object(b,"$group"); bson_append_string(b,"_id", "$list"); bson_append_start_object(b, "distinct_count"); bson_append_int(b, "$sum", 1); bson_append_finish_object(b); bson_append_finish_object(b); bson_append_finish_object(b); bson_append_finish_array(b); bson_finish(b); /*So you can see your command*/ bson_print(b); /*run the command*/ mongo_run_command(conn, "test", b, b_result); /*command results*/ bson_print(b_result); bson_destroy(b_result); bson_destroy(b); mongo_destroy(conn); return 0; }
/* Update DLR */ static void dlr_mongodb_update(const Octstr *smsc, const Octstr *ts, const Octstr *dst, int status) { DBPoolConn *pconn; bson cond, op; bson_buffer cond_buf, op_buf; mongo_connection *conn = NULL; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&cond_buf); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(smsc)); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(ts)); if (dst) { bson_append_string(&cond_buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(dst)); } bson_from_buffer(&cond, &cond_buf); bson_buffer_init(&op_buf); { bson_buffer *sub = bson_append_start_object(&op_buf, "$set"); bson_append_int(sub, octstr_get_cstr(fields->field_status), status); bson_append_finish_object(sub); } bson_from_buffer(&op, &op_buf); MONGO_TRY { mongo_update(conn, mongodb_namespace, &cond, &op, 0); } MONGO_CATCH { mongodb_error("dlr_mongodb_update", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&cond); bson_destroy(&op); }
static void tutorial_simple_query( mongo *conn ) { bson query[1]; mongo_cursor cursor[1]; bson_init( query ); bson_append_int( query, "age", 24 ); bson_finish( query ); mongo_cursor_init( cursor, conn, "tutorial.persons" ); mongo_cursor_set_query( cursor, query ); while( mongo_cursor_next( cursor ) == MONGO_OK ) { bson_iterator iterator[1]; if ( bson_find( iterator, mongo_cursor_bson( cursor ), "name" )) { printf( "name: %s\n", bson_iterator_string( iterator ) ); } } bson_destroy( query ); mongo_cursor_destroy( cursor ); }
bson gridfile_get_chunk(gridfile* gfile, int n) { bson query; bson out; bson_buffer buf; bson_iterator it; bson_oid_t id; bson_buffer_init(&buf); bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", n); bson_from_buffer(&query, &buf); assert(mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out)); return out; }
MONGO_EXPORT int mongo_simple_int_command( mongo *conn, const char *db, const char *cmdstr, int arg, bson *realout ) { bson out = {NULL, 0}; bson cmd; int result; bson_init( &cmd ); bson_append_int( &cmd, cmdstr, arg ); bson_finish( &cmd ); result = mongo_run_command( conn, db, &cmd, &out ); bson_destroy( &cmd ); if ( realout ) *realout = out; else bson_destroy( &out ); return result; }
int mongodb_insert_key_stat(bot_t * bot, char *db, char *key, int value) { bson b; if (!db || !key || value < 0) { return -1; } debug(bot, "mongodb_insert_key: Entered\n"); bson_init(&b); bson_append_string(&b, "key", key); bson_append_int(&b, "value", value); bson_finish(&b); mongo_insert(&gi->mongo_conn, db, &b); bson_destroy(&b); return 0; }
int test_bad_query( mongo *conn ) { mongo_cursor cursor[1]; bson b[1]; bson_init( b ); bson_append_start_object( b, "foo" ); bson_append_int( b, "$bad", 1 ); bson_append_finish_object( b ); bson_finish( b ); mongo_cursor_init( cursor, conn, "test.cursors" ); mongo_cursor_set_query( cursor, b ); ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); ASSERT( cursor->err == MONGO_CURSOR_QUERY_FAIL ); ASSERT( cursor->conn->lasterrcode == 10068 ); ASSERT( strlen( cursor->conn->lasterrstr ) > 0 ); mongo_cursor_destroy( cursor ); bson_destroy( b ); return 0; }
int test_error_messages( void ) { mongo conn[1]; bson b[1]; const char *ns = "test.foo"; mongo_init( conn ); bson_init( b ); bson_append_int( b, "foo", 17 ); bson_finish( b ); ASSERT( mongo_insert( conn, ns, b, NULL ) != MONGO_OK ); ASSERT( conn->err == MONGO_IO_ERROR ); ASSERT( conn->errcode == WSAENOTSOCK ); mongo_init( conn ); ASSERT( mongo_count( conn, "test", "foo", NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_IO_ERROR ); ASSERT( conn->errcode == WSAENOTSOCK ); return 0; }
void test_example_awesome() { bcon_error_t ret; bson b[1]; /* JSON {"BSON": ["awesome", 5.05, 1986]} */ bcon awesome[] = { "BSON", "[", "awesome", BF(5.05), BI(1986), "]", BEND }; test_bson_from_bcon( awesome, BCON_OK, BSON_VALID ); if (verbose ) printf("\t--------\n"); bson_init( b ); bson_append_start_array( b, "BSON" ); bson_append_string( b, "0", "awesome" ); bson_append_double( b, "1", 5.05 ); bson_append_int( b, "2", 1986 ); bson_append_finish_array( b ); ret = bson_finish( b ); if ( verbose ) bson_print( b ); bson_destroy( b ); }
int main() { mongo conn[1]; int status = mongo_client( conn, "127.0.0.1", 27017 ); if( status != MONGO_OK ) { switch ( conn->err ) { case MONGO_CONN_NO_SOCKET: printf( "no socket\n" ); return 1; case MONGO_CONN_FAIL: printf( "connection failed\n" ); return 1; case MONGO_CONN_NOT_MASTER: printf( "not master\n" ); return 1; } } bson b[1]; bson_init( b ); bson_append_new_oid( b, "_id" ); bson_append_string( b, "name", "Joe" ); bson_append_int( b, "age", 33 ); bson_finish( b ); mongo_insert( conn, "tutorial.persons", b, 0 ); bson_destroy( b ); tutorial_insert_batch(conn); tutorial_empty_query(conn); tutorial_simple_query(conn); tutorial_update(conn); tutorial_index(conn); mongo_destroy( conn ); return 0; }
MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { bson query; bson_iterator it; bson_oid_t id; int result; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); result = (mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, out ) == MONGO_OK ); bson_destroy( &query ); if (!result) { bson empty; bson_empty(&empty); bson_copy(out, &empty); } }
int gridfs_init( mongo *client, const char *dbname, const char *prefix, gridfs *gfs ) { int options; bson b; bson_bool_t success; gfs->client = client; /* Allocate space to own the dbname */ gfs->dbname = ( const char * )bson_malloc( strlen( dbname )+1 ); strcpy( ( char * )gfs->dbname, dbname ); /* Allocate space to own the prefix */ if ( prefix == NULL ) prefix = "fs"; gfs->prefix = ( const char * )bson_malloc( strlen( prefix )+1 ); strcpy( ( char * )gfs->prefix, prefix ); /* Allocate space to own files_ns */ gfs->files_ns = ( const char * ) bson_malloc ( strlen( prefix )+strlen( dbname )+strlen( ".files" )+2 ); strcpy( ( char * )gfs->files_ns, dbname ); strcat( ( char * )gfs->files_ns, "." ); strcat( ( char * )gfs->files_ns, prefix ); strcat( ( char * )gfs->files_ns, ".files" ); /* Allocate space to own chunks_ns */ gfs->chunks_ns = ( const char * ) bson_malloc( strlen( prefix ) + strlen( dbname ) + strlen( ".chunks" ) + 2 ); strcpy( ( char * )gfs->chunks_ns, dbname ); strcat( ( char * )gfs->chunks_ns, "." ); strcat( ( char * )gfs->chunks_ns, prefix ); strcat( ( char * )gfs->chunks_ns, ".chunks" ); bson_init( &b ); bson_append_int( &b, "filename", 1 ); bson_finish( &b ); options = 0; success = ( mongo_create_index( gfs->client, gfs->files_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } bson_init( &b ); bson_append_int( &b, "files_id", 1 ); bson_append_int( &b, "n", 1 ); bson_finish( &b ); options = MONGO_INDEX_UNIQUE; success = ( mongo_create_index( gfs->client, gfs->chunks_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } return MONGO_OK; }