void test_large() { mongo_connection conn[1]; mongo_connection_options opts; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; int i, n; char buffer[LARGE]; int64_t filesize = (int64_t)1024 * (int64_t)LARGE; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; strncpy(opts.host, "127.0.0.1", 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )) { printf("failed to connect\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); /* Create a very large file */ fill_buffer_randomly(buffer, (int64_t)LARGE); fd = fopen("bigfile", "w"); for(i=0; i<1024; i++) { fwrite(buffer, 1, LARGE, fd); } fclose(fd); /* Now read the file into GridFS */ gridfs_store_file(gfs, "bigfile", "bigfile", "text/html"); gridfs_find_filename(gfs, "bigfile", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); /* Read the file using the streaming interface */ gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html"); fd = fopen("bigfile", "r"); while((n = fread(buffer, 1, 1024, fd)) != 0) { gridfile_write_buffer(gfile, buffer, n); } gridfile_writer_done( gfile ); gridfs_find_filename(gfs, "bigfile-stream", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_destroy(gfs); mongo_destroy(conn); }
void test_large() { mongo_connection conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; int i, n; char *buffer = malloc( LARGE ); if( buffer == NULL ) { printf("Failed to allocate memory."); exit(1); } uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 1\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); /* Create a very large file */ fill_buffer_randomly(buffer, (uint64_t)LARGE); fd = fopen("bigfile", "w"); for(i=0; i<1024; i++) { fwrite(buffer, 1, LARGE, fd); } fclose(fd); /* Now read the file into GridFS */ gridfs_store_file(gfs, "bigfile", "bigfile", "text/html"); gridfs_find_filename(gfs, "bigfile", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); /* Read the file using the streaming interface */ gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html"); fd = fopen("bigfile", "r"); while((n = fread(buffer, 1, 1024, fd)) != 0) { gridfile_write_buffer(gfile, buffer, n); } gridfile_writer_done( gfile ); gridfs_find_filename(gfs, "bigfile-stream", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); }
void test_basic( void ) { mongo conn[1]; gridfs gfs[1]; char *data_before = (char*)bson_malloc( UPPER ); int64_t i; FILE *fd; srand((unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; if ( mongo_client( conn, TEST_SERVER, 27017 ) ) { printf( "failed to connect 2\n" ); exit( 1 ); } gridfs_init( conn, "test", "fs", gfs ); fill_buffer_randomly( data_before, UPPER ); for ( i = LOWER; i <= UPPER; i += DELTA ) { /* Input from buffer */ gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_COMPRESS ); test_gridfile( gfs, data_before, i, "input-buffer", "text/html" ); /* Input from file */ fd = fopen( "input-file", "w" ); fwrite( data_before, sizeof( char ), (size_t)i, fd ); fclose( fd ); gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, i, "input-file", "text/html" ); gfs->caseInsensitive = 1; gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, i, "inPut-file", "text/html" ); } gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); free( data_before ); /* Clean up files. */ _unlink( "input-file" ); _unlink( "output" ); }
SEXP mongo_gridfs_store_file(SEXP gfs, SEXP filename, SEXP remotename, SEXP contenttype) { gridfs* _gfs = _checkGridfs(gfs); const char* _filename = CHAR(STRING_ELT(filename, 0)); const char* _remotename = CHAR(STRING_ELT(remotename, 0)); const char* _contenttype = CHAR(STRING_ELT(contenttype, 0)); SEXP ret; PROTECT(ret = allocVector(LGLSXP, 1)); LOGICAL(ret)[0] = (gridfs_store_file(_gfs, _filename, _remotename, _contenttype) == MONGO_OK); UNPROTECT(1); return ret; }
int main(void) { mongo_connection conn[1]; mongo_connection_options opts; gridfs gfs[1]; char data_before[UPPER]; size_t i; FILE *fd; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; /* strncpy(opts.host, TEST_SERVER, 255);*/ strncpy(opts.host, "127.0.0.1", 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )){ printf("failed to connect\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); for (i = LOWER; i <= UPPER; i+=DELTA) { fill_buffer_randomly(data_before, i); /* Input from buffer */ gridfs_store_buffer(gfs, data_before, i, "input-buffer", "text/html"); test_gridfile(gfs, data_before, i, "input-buffer", "text/html"); /* Input from file */ fd = fopen("input-file", "w"); fwrite(data_before, sizeof(char), i, fd); fclose(fd); gridfs_store_file(gfs, "input-file", "input-file", "text/html"); test_gridfile(gfs, data_before, i, "input-file", "text/html"); } gridfs_destroy(gfs); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
void test_basic() { mongo_connection conn[1]; gridfs gfs[1]; char *data_before = malloc( UPPER ); if( data_before == NULL ) { printf("Failed to allocate"); exit(1); } uint64_t i; FILE *fd; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 2\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); fill_buffer_randomly( data_before, UPPER ); for (i = LOWER; i <= UPPER; i += DELTA) { /* Input from buffer */ gridfs_store_buffer(gfs, data_before, i, "input-buffer", "text/html"); test_gridfile(gfs, data_before, i, "input-buffer", "text/html"); /* Input from file */ fd = fopen("input-file", "w"); fwrite(data_before, sizeof(char), i, fd); fclose(fd); gridfs_store_file(gfs, "input-file", "input-file", "text/html"); test_gridfile(gfs, data_before, i, "input-file", "text/html"); } gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); free( data_before ); }
static void test_large( void ) { mongo conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; size_t i, n; char *buffer = (char*)bson_malloc( LARGE ); char *read_buf = (char*)bson_malloc( LARGE ); gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE; mongo_write_concern wc; bson lastError; bson lastErrorCmd; srand( (unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; mongo_write_concern_init(&wc); wc.j = 1; mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); GFS_INIT; fd = fopen( "bigfile", "r" ); if( fd ) { fclose( fd ); } else { /* Create a very large file */ fill_buffer_randomly( buffer, ( int64_t )LARGE ); fd = fopen( "bigfile", "w" ); for( i=0; i<1024; i++ ) { fwrite( buffer, 1, LARGE, fd ); } fclose( fd ); } /* Now read the file into GridFS */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS); gridfs_find_filename( gfs, "bigfile", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); fd = fopen( "bigfile", "r" ); while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) { ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n ); ASSERT( memcmp( buffer, read_buf, n ) == 0 ); } fclose( fd ); gridfile_destroy( gfile ); /* Read the file using the streaming interface */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS ); mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fd = fopen( "bigfile", "r" ); i = 0; while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) { ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n ); if(i++ % 10 == 0) { bson_init( &lastErrorCmd ); bson_append_int( &lastErrorCmd, "getLastError", 1); bson_append_int( &lastErrorCmd, "j", 1); bson_finish( &lastErrorCmd ); bson_init( &lastError ); mongo_run_command( conn, "test", &lastErrorCmd, &lastError ); bson_destroy( &lastError ); bson_destroy( &lastErrorCmd ); } } mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 1; /* Let's reset write concern j field to 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fclose( fd ); gridfile_writer_done( gfile ); gridfs_find_filename( gfs, "bigfile-stream", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); bson_free( buffer ); bson_free( read_buf ); mongo_write_concern_destroy( &wc ); }
static void test_random_write(void) { mongo conn[1]; gridfs gfs[1]; gridfile* gfile; char *data_before = (char*)bson_malloc( UPPER ); char *random_data = (char*)bson_malloc( UPPER ); char *buf = (char*) bson_malloc( UPPER ); int64_t i; FILE *fd; srand((unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; GFS_INIT; fill_buffer_randomly( data_before, UPPER ); fill_buffer_randomly( random_data, UPPER ); for ( i = LOWER; i <= UPPER; i += DELTA ) { int64_t j = i / 2 - 3; gridfs_offset bytes_to_write_first; int n; /* Input from buffer */ gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_DEFAULT ); if ( i > DEFAULT_CHUNK_SIZE * 4 ) { n = DEFAULT_CHUNK_SIZE * 3 + 6; memcpy(&data_before[j], random_data, n); // Let's overwrite the buffer with bytes crossing multiple chunks bytes_to_write_first = 10; } else { n = 6; memcpy(random_data, "123456", n); strncpy(&data_before[j], random_data, n); // Let's overwrite the buffer with a few some bytes bytes_to_write_first = 0; } gfile = gridfile_create(); ASSERT(gridfs_find_filename(gfs, "input-buffer", gfile) == 0); gridfile_writer_init(gfile, gfs, "input-buffer", "text/html", GRIDFILE_DEFAULT ); gridfile_seek(gfile, j); // Seek into the same buffer position within the GridFS file if ( bytes_to_write_first ) { ASSERT( gridfile_write_buffer(gfile, random_data, bytes_to_write_first) == bytes_to_write_first ); // Let's write 10 bytes first, and later the rest } ASSERT( gridfile_write_buffer(gfile, &random_data[bytes_to_write_first], n - bytes_to_write_first) == n - bytes_to_write_first ); // Try to write to the existing GridFS file on the position given by j gridfile_seek(gfile, j); gridfile_read_buffer( gfile, buf, n ); ASSERT(memcmp( buf, &data_before[j], n) == 0); gridfile_writer_done(gfile); ASSERT(gfile->pos == (gridfs_offset)(j + n)); gridfile_dealloc(gfile); test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-buffer", "text/html" ); /* Input from file */ fd = fopen( "input-file", "w" ); fwrite( data_before, sizeof( char ), (size_t) (j + n > i ? j + n : i), fd ); fclose( fd ); gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-file", "text/html" ); } gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); free( data_before ); free( random_data ); free( buf ); /* Clean up files. */ gridfs_test_unlink( "input-file" ); gridfs_test_unlink( "output" ); }
EXPORT int mongo_gridfs_store_file(struct gridfs_* gfs, char* filename, char* remoteName, char* contentType) { return (gridfs_store_file((gridfs*)gfs, filename, remoteName, contentType) == MONGO_OK); }