int mongo_simple_str_command(mongo_connection * conn, const char * db, const char* cmdstr, const char* arg, bson * realout) { bson out; bson cmd; bson_buffer bb; int success = 0; bson_buffer_init(&bb); bson_append_string(&bb, cmdstr, arg); bson_from_buffer(&cmd, &bb); if( mongo_run_command(conn, db, &cmd, &out) == MONGO_OK ) { bson_iterator it; if(bson_find(&it, &out, "ok")) success = bson_iterator_bool(&it); } bson_destroy(&cmd); if (realout) *realout = out; else bson_destroy(&out); if(success) return MONGO_OK; else return MONGO_ERROR; }
int mongo_simple_int_command( mongo *conn, const char *db, const char *cmdstr, int arg, bson *realout ) { bson out = {NULL, 0}; bson cmd; bson_bool_t success = 0; bson_init( &cmd ); bson_append_int( &cmd, cmdstr, arg ); bson_finish( &cmd ); if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "ok" ) ) success = bson_iterator_bool( &it ); } bson_destroy( &cmd ); if ( realout ) *realout = out; else bson_destroy( &out ); if( success ) return MONGO_OK; else { conn->err = MONGO_COMMAND_FAILED; return MONGO_ERROR; } }
int64_t mongo_count(mongo_connection* conn, const char* db, const char* ns, bson* query){ bson_buffer bb; bson cmd; bson out; int64_t count = -1; bson_buffer_init(&bb); bson_append_string(&bb, "count", ns); if (query && bson_size(query) > 5) /* not empty */ bson_append_bson(&bb, "query", query); bson_from_buffer(&cmd, &bb); MONGO_TRY{ if(mongo_run_command(conn, db, &cmd, &out)){ bson_iterator it; if(bson_find(&it, &out, "n")) count = bson_iterator_long(&it); } }MONGO_CATCH{ bson_destroy(&cmd); MONGO_RETHROW(); } bson_destroy(&cmd); bson_destroy(&out); return count; }
int64_t mongo_count( mongo *conn, const char *db, const char *ns, bson *query ) { bson cmd; bson out; // = {NULL, 0}; memset(&out, 0, sizeof(out)); int64_t count = -1; bson_init( &cmd ); bson_append_string( &cmd, "count", ns ); if ( query && bson_size( query ) > 5 ) /* not empty */ bson_append_bson( &cmd, "query", query ); bson_finish( &cmd ); if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "n" ) ) count = bson_iterator_long( &it ); bson_destroy( &cmd ); bson_destroy( &out ); return count; } else { bson_destroy( &out ); bson_destroy( &cmd ); return MONGO_ERROR; } }
result_t MongoDB::_runCommand(bson *command, bson &out, AsyncEvent* ac) { if (!ac) return CHECK_ERROR(CALL_E_NOSYNC); if (mongo_run_command(&m_conn, m_ns.c_str(), command, &out) != MONGO_OK) { bson_destroy(&out); bson_destroy(command); return CHECK_ERROR(error()); } return 0; }
void create_capped_collection( mongo *conn ) { bson b; bson_init( &b ); bson_append_string( &b, "create", "cursors" ); bson_append_bool( &b, "capped", 1 ); bson_append_int( &b, "size", 1000000 ); bson_finish( &b ); ASSERT( mongo_run_command( conn, "test", &b, NULL ) == MONGO_OK ); bson_destroy( &b ); }
SEXP mongo_command(SEXP mongo_conn, SEXP db, SEXP command) { mongo* conn = _checkMongo(mongo_conn); const char* _db = CHAR(STRING_ELT(db, 0)); bson* _command = _checkBSON(command); bson out; if (mongo_run_command(conn, _db, _command, &out) != MONGO_OK) { return R_NilValue; } SEXP ret = _mongo_bson_create(&out); bson_destroy(&out); UNPROTECT(3); return ret; }
bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { bson from_db; bson cmd; bson out; const char *nonce; bson_bool_t success = 0; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { bson_iterator it; bson_find( &it, &from_db, "nonce" ); nonce = bson_iterator_string( &it ); } else { return MONGO_ERROR; } mongo_pass_digest( user, pass, hex_digest ); mongo_md5_init( &st ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, strlen( nonce ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); mongo_md5_finish( &st, digest ); digest2hex( digest, hex_digest ); bson_init( &cmd ); bson_append_int( &cmd, "authenticate", 1 ); bson_append_string( &cmd, "user", user ); bson_append_string( &cmd, "nonce", nonce ); bson_append_string( &cmd, "key", hex_digest ); bson_finish( &cmd ); bson_destroy( &from_db ); /*bson_init( &from_db ); */ if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "ok" ) ) success = bson_iterator_bool( &it ); } bson_destroy( &from_db ); bson_destroy( &cmd ); if( success ) return MONGO_OK; else return MONGO_ERROR; }
SEXP mongo_rename(SEXP mongo_conn, SEXP from_ns, SEXP to_ns) { mongo* conn = _checkMongo(mongo_conn); const char* _from_ns = CHAR(STRING_ELT(from_ns, 0)); const char* _to_ns = CHAR(STRING_ELT(to_ns, 0)); bson cmd; bson_init(&cmd); bson_append_string(&cmd, "renameCollection", _from_ns); bson_append_string(&cmd, "to", _to_ns); bson_finish(&cmd); SEXP ret; PROTECT(ret = allocVector(LGLSXP, 1)); LOGICAL(ret)[0] = (mongo_run_command(conn, "admin", &cmd, NULL) == MONGO_OK); bson_destroy(&cmd); UNPROTECT(1); return ret; }
int main() { mongo conn[1]; bson cmd[1]; bson out[1]; bson_iterator it[1]; char version[10]; const char *db = "test"; const char *col = "c.capped"; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn , TEST_SERVER , 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } mongo_cmd_drop_collection( conn, db, col, NULL ); ASSERT( mongo_create_capped_collection( conn, db, col, 1024, 100, NULL ) == MONGO_OK ); bson_init( cmd ); bson_append_string( cmd, "collstats", col ); bson_finish( cmd ); ASSERT( mongo_run_command( conn, db, cmd, out ) == MONGO_OK ); if( mongo_get_server_version( version ) != -1 ){ if( version[0] == '2' && version[2] >= '1' ) ASSERT( bson_find( it, out, "capped" ) == BSON_BOOL ); else ASSERT( bson_find( it, out, "capped" ) == BSON_INT ); } ASSERT( bson_find( it, out, "max" ) == BSON_INT ); bson_destroy( cmd ); bson_destroy( out ); mongo_cmd_drop_collection( conn, "test", col, NULL ); mongo_cmd_drop_db( conn, db ); mongo_destroy( conn ); return 0; }
static int gridfs_insert_file2( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype, gridfile* gfile ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_append_bson(&ret, "metadata", gfile->meta); bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret ); bson_destroy( &ret ); return result; }
int main() { /* * We assume objects in the form of {_id:<any_id>, list:[{a:<int>,b:<int>}, ...]} */ mongo conn[1]; mongo_init(conn); if(MONGO_OK != mongo_client(conn, "127.0.0.1", 27017)) return 1; bson b[1], b_result[1]; /*create the aggregation command in bson*/ bson_init(b); bson_append_string(b, "aggregate", "agg"); bson_append_start_array(b, "pipeline"); bson_append_start_object(b,"0"); bson_append_string(b, "$unwind", "$list"); bson_append_finish_object(b); bson_append_start_object(b,"1"); bson_append_start_object(b,"$group"); bson_append_string(b,"_id", "$list"); bson_append_start_object(b, "distinct_count"); bson_append_int(b, "$sum", 1); bson_append_finish_object(b); bson_append_finish_object(b); bson_append_finish_object(b); bson_append_finish_array(b); bson_finish(b); /*So you can see your command*/ bson_print(b); /*run the command*/ mongo_run_command(conn, "test", b, b_result); /*command results*/ bson_print(b_result); bson_destroy(b_result); bson_destroy(b); mongo_destroy(conn); return 0; }
MONGO_EXPORT int mongo_simple_str_command( mongo *conn, const char *db, const char *cmdstr, const char *arg, bson *realout ) { bson out = {NULL, 0}; int result; bson cmd; bson_init( &cmd ); bson_append_string( &cmd, cmdstr, arg ); bson_finish( &cmd ); result = mongo_run_command( conn, db, &cmd, &out ); bson_destroy( &cmd ); if ( realout ) *realout = out; else bson_destroy( &out ); return result; }
static bson gridfs_insert_file( gridfs* gfs, const char* name, const bson_oid_t id, gridfs_offset length, const char* contenttype) { bson command; bson res; bson ret; bson_buffer buf; bson_iterator it; /* Check run md5 */ bson_buffer_init(&buf); bson_append_oid(&buf, "filemd5", &id); bson_append_string(&buf, "root", gfs->prefix); bson_from_buffer(&command, &buf); assert(mongo_run_command(gfs->client, gfs->dbname, &command, &res)); bson_destroy(&command); /* Create and insert BSON for file metadata */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); if (name != NULL && *name != '\0') { bson_append_string(&buf, "filename", name); } bson_append_int(&buf, "length", length); bson_append_int(&buf, "chunkSize", DEFAULT_CHUNK_SIZE); bson_append_date(&buf, "uploadDate", (bson_date_t)1000*time(NULL)); bson_find(&it, &res, "md5"); bson_append_string(&buf, "md5", bson_iterator_string(&it)); bson_destroy(&res); if (contenttype != NULL && *contenttype != '\0') { bson_append_string(&buf, "contentType", contenttype); } bson_from_buffer(&ret, &buf); mongo_insert(gfs->client, gfs->files_ns, &ret); return ret; }
static void test_large( void ) { mongo conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; size_t i, n; char *buffer = (char*)bson_malloc( LARGE ); char *read_buf = (char*)bson_malloc( LARGE ); gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE; mongo_write_concern wc; bson lastError; bson lastErrorCmd; srand( (unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; mongo_write_concern_init(&wc); wc.j = 1; mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); GFS_INIT; fd = fopen( "bigfile", "r" ); if( fd ) { fclose( fd ); } else { /* Create a very large file */ fill_buffer_randomly( buffer, ( int64_t )LARGE ); fd = fopen( "bigfile", "w" ); for( i=0; i<1024; i++ ) { fwrite( buffer, 1, LARGE, fd ); } fclose( fd ); } /* Now read the file into GridFS */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS); gridfs_find_filename( gfs, "bigfile", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); fd = fopen( "bigfile", "r" ); while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) { ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n ); ASSERT( memcmp( buffer, read_buf, n ) == 0 ); } fclose( fd ); gridfile_destroy( gfile ); /* Read the file using the streaming interface */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS ); mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fd = fopen( "bigfile", "r" ); i = 0; while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) { ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n ); if(i++ % 10 == 0) { bson_init( &lastErrorCmd ); bson_append_int( &lastErrorCmd, "getLastError", 1); bson_append_int( &lastErrorCmd, "j", 1); bson_finish( &lastErrorCmd ); bson_init( &lastError ); mongo_run_command( conn, "test", &lastErrorCmd, &lastError ); bson_destroy( &lastError ); bson_destroy( &lastErrorCmd ); } } mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 1; /* Let's reset write concern j field to 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fclose( fd ); gridfile_writer_done( gfile ); gridfs_find_filename( gfs, "bigfile-stream", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); bson_free( buffer ); bson_free( read_buf ); mongo_write_concern_destroy( &wc ); }