예제 #1
0
void test_large() {
    mongo_connection conn[1];
    mongo_connection_options opts;
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char buffer[LARGE];
    int64_t filesize = (int64_t)1024 * (int64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    strncpy(opts.host, "127.0.0.1", 255);
    opts.host[254] = '\0';
    opts.port = 27017;

    if (mongo_connect( conn , &opts )) {
        printf("failed to connect\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (int64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
        fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
        gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_destroy(conn);
}
예제 #2
0
void test_large() {
    mongo_connection conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char *buffer = malloc( LARGE );
    if( buffer == NULL ) {
        printf("Failed to allocate memory.");
        exit(1);
    }
    uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    if (mongo_connect( conn, TEST_SERVER, 27017 )){
        printf("failed to connect 1\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (uint64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
      fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
      gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_disconnect(conn);
    mongo_destroy(conn);
}
예제 #3
0
SEXP mongo_gridfile_get_filename(SEXP gfile) {
    gridfile* _gfile = _checkGridfile(gfile);
    SEXP ret;
    PROTECT(ret = allocVector(STRSXP, 1));
    SET_STRING_ELT(ret, 0, mkChar(gridfile_get_filename(_gfile)));
    UNPROTECT(1);
    return ret;
}
예제 #4
0
void test_gridfile(gridfs *gfs, char *data_before, uint64_t length, char *filename, char *content_type) {
    gridfile gfile[1];
    char *data_after = malloc( LARGE );
    if( data_after == NULL ) {
        printf("Failed to allocated memory");
        exit(1);
    }
    FILE * fd;
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
    char hex_digest[33];
    uint64_t i = length;
    int n;

    gridfs_find_filename(gfs, filename, gfile);
    ASSERT(gridfile_exists(gfile));

    fd = fopen("output", "w+");
    gridfile_write_file(gfile, fd);
    fseek(fd, 0, SEEK_SET);
    ASSERT(fread(data_after, length, sizeof(char), fd));
    fclose(fd);
    ASSERT( strncmp(data_before, data_after, length) == 0 );

    gridfile_read( gfile, length, data_after );
    ASSERT( strncmp(data_before, data_after, length) == 0 );

    ASSERT( strcmp( gridfile_get_filename( gfile ), filename ) == 0 );

    ASSERT( gridfile_get_contentlength( gfile ) == length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0) ;

    ASSERT( strncmp( data_before, data_after, length ) == 0 );

    mongo_md5_init(pms);

    n = 0;
    while( i > INT_MAX  ) {
        mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before + (n * INT_MAX), INT_MAX);
        i -= INT_MAX;
        n += 1;
    }
    if( i > 0 )
        mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before + (n * INT_MAX), i);

    mongo_md5_finish(pms, digest);
    digest2hex(digest, hex_digest);
    ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );

    gridfile_destroy(gfile);
    gridfs_remove_filename(gfs, filename);
    free( data_after );
}
예제 #5
0
void test_gridfile(gridfs *gfs, char *data_before, size_t length, char *filename, char *content_type) {
    gridfile gfile[1];
    char data_after[UPPER];
    FILE * fd;
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
    char hex_digest[33];

    gridfs_find_filename(gfs, filename, gfile);
    ASSERT(gridfile_exists(gfile));
    
    fd = fopen("output", "w+");
    gridfile_write_file(gfile, fd);
    fseek(fd, 0, SEEK_SET);
    ASSERT(fread(data_after, length, sizeof(char), fd));
    fclose(fd);
    ASSERT( strncmp(data_before, data_after, length) == 0 );
    
    gridfile_read( gfile, length, data_after);
    ASSERT( strncmp(data_before, data_after, length) == 0 );

    ASSERT( strcmp( gridfile_get_filename( gfile ), filename ) == 0 ); 

    ASSERT( gridfile_get_contentlength( gfile ) == length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0) ;

    mongo_md5_init(pms);
    mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before, (int)length);
    mongo_md5_finish(pms, digest);
    digest2hex(digest, hex_digest);
    ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );

    gridfile_destroy(gfile);
    gridfs_remove_filename(gfs, filename);
}
예제 #6
0
static
void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type )
{
    gridfile gfile[1];
    FILE *stream;
#ifdef	DYING
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
#endif
    char hex_digest[33];
    int64_t i = length;
    int n;
    char *data_after = (char*)bson_malloc( LARGE );
    int truncBytes;
    char* lowerName;

    ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK);
    ASSERT( gridfile_exists( gfile ) );

    stream = fopen( "output", "w+" );
    gridfile_write_file( gfile, stream );
    fseek( stream, 0, SEEK_SET );
    ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) );
    fclose( stream );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    gridfile_read_buffer( gfile, data_after, length );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1);    
    strcpy( lowerName, filename );
    _strlwr( lowerName );
    
    ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 );
    bson_free( lowerName );

    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ;

    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    if( !( gfile->flags & GRIDFILE_COMPRESS ) ) {
#ifdef	DYING
      mongo_md5_init( pms );

      n = 0;
      while( i > INT_MAX  ) {
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX );
          i -= INT_MAX;
          n += 1;
      }
      if( i > 0 )
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i );

      mongo_md5_finish( pms, digest );
      digest2hex( digest, hex_digest );
#else
      { DIGEST_CTX ctx = rpmDigestInit(PGPHASHALGO_MD5, RPMDIGEST_NONE);
        const char * _digest = NULL;
        int xx;
        while( i > INT_MAX  ) {
          xx = rpmDigestUpdate(ctx, (char *)data_before + (n*INT_MAX), INT_MAX);
          i -= INT_MAX;
          n += 1;
	}
        xx = rpmDigestFinal(ctx, &_digest, NULL, 1);
        strncpy(hex_digest, _digest, 32+1);
	hex_digest[32] = '\0';
        _digest = _free(_digest);
      }
#endif

      ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );
    }

    truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); 
    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes));
    gridfile_writer_done( gfile );

    gridfile_seek(gfile, 0);
    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) );
    ASSERT( gridfile_read_buffer( gfile, data_after, length ) ==  (size_t)(length - truncBytes));
    ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 );

    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    gridfile_truncate(gfile, 0);
    gridfile_writer_done( gfile );

    ASSERT( gridfile_get_contentlength( gfile ) == 0 );
    ASSERT( gridfile_read_buffer( gfile, data_after, length ) == 0 );

    gridfile_destroy( gfile );
    ASSERT( gridfs_remove_filename( gfs, filename ) == MONGO_OK );
    free( data_after );
    gridfs_test_unlink( "output" );
}
예제 #7
0
static
void test_large( void )
{
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    size_t i, n;
    char *buffer = (char*)bson_malloc( LARGE );
    char *read_buf = (char*)bson_malloc( LARGE );
    gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE;
    mongo_write_concern wc;    
    bson lastError;
    bson lastErrorCmd;
    
    srand( (unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    
    mongo_write_concern_init(&wc);
    wc.j = 1;
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    GFS_INIT;

    fd = fopen( "bigfile", "r" );
    if( fd ) {
      fclose( fd );
    } else {
      /* Create a very large file */
      fill_buffer_randomly( buffer, ( int64_t )LARGE );
      fd = fopen( "bigfile", "w" );
      for( i=0; i<1024; i++ ) {
        fwrite( buffer, 1, LARGE, fd );
      }
      fclose( fd );
    }

    /* Now read the file into GridFS */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS);

    gridfs_find_filename( gfs, "bigfile", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    
    fd = fopen( "bigfile", "r" );

    while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) {
      ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n );
      ASSERT( memcmp( buffer, read_buf, n ) == 0 );
    }

    fclose( fd );
    gridfile_destroy( gfile );

    /* Read the file using the streaming interface */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_remove_filename( gfs, "bigfile-stream" );
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS );

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fd = fopen( "bigfile", "r" );
    i = 0;
    while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) {
        ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n );     
        if(i++ % 10 == 0) {
          bson_init( &lastErrorCmd );
          bson_append_int( &lastErrorCmd, "getLastError", 1);
          bson_append_int( &lastErrorCmd, "j", 1);
          bson_finish( &lastErrorCmd );

          bson_init( &lastError );
          mongo_run_command( conn, "test", &lastErrorCmd, &lastError );

          bson_destroy( &lastError );
          bson_destroy( &lastErrorCmd );
        }
    }

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 1; /* Let's reset write concern j field to 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fclose( fd );
    gridfile_writer_done( gfile );

    gridfs_find_filename( gfs, "bigfile-stream", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    gridfs_remove_filename( gfs, "bigfile-stream" );

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );

    bson_free( buffer );
    bson_free( read_buf );
    mongo_write_concern_destroy( &wc );
}
예제 #8
0
EXPORT const char* mongo_gridfile_get_filename(struct gridfile_* gf) {
    return gridfile_get_filename((gridfile*)gf);
}
예제 #9
0
void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type ) {
    gridfile gfile[1];
    FILE *stream;
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
    char hex_digest[33];
    int64_t i = length;
    int n;
    char *data_after = (char*)bson_malloc( LARGE );
    int truncBytes;
    char* lowerName;

    ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK);
    ASSERT( gridfile_exists( gfile ) );

    stream = fopen( "output", "w+" );
    gridfile_write_file( gfile, stream );
    fseek( stream, 0, SEEK_SET );
    ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) );
    fclose( stream );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    gridfile_read( gfile, length, data_after );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1);
    strcpy( lowerName, filename);
    _strlwr( lowerName );
    ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 );
    bson_free( lowerName );

    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ;

    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    if( !( gfile->flags & GRIDFILE_COMPRESS ) ) {
      mongo_md5_init( pms );

      n = 0;
      while( i > INT_MAX  ) {
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX );
          i -= INT_MAX;
          n += 1;
      }
      if( i > 0 )
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i );

      mongo_md5_finish( pms, digest );
      digest2hex( digest, hex_digest );
      ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );
    }

    truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); 
    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes));
    gridfile_writer_done( gfile );

    gridfile_seek(gfile, 0);
    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) );
    ASSERT( gridfile_read( gfile, length, data_after ) ==  (size_t)(length - truncBytes));
    ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 );

    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    gridfile_truncate(gfile, 0);
    gridfile_writer_done( gfile );

    ASSERT( gridfile_get_contentlength( gfile ) == 0 );
    ASSERT( gridfile_read( gfile, length, data_after ) == 0 );

    gridfile_destroy( gfile );
    gridfs_remove_filename( gfs, filename );
    free( data_after );
    _unlink( "output" );
}