Exemplo n.º 1
0
void test_streaming( void ) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    char *medium = (char*)bson_malloc( 2*MEDIUM );
    char *small = (char*)bson_malloc( LOWER );
    char *buf = (char*)bson_malloc( LARGE );
    int n;

    if( buf == NULL || small == NULL ) {
        printf( "Failed to allocate" );
        exit( 1 );
    }

    srand( (unsigned int)time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;

    if ( mongo_client( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect 3\n" );
        exit( 1 );
    }

    fill_buffer_randomly( medium, ( int64_t )2 * MEDIUM );
    fill_buffer_randomly( small, ( int64_t )LOWER );
    fill_buffer_randomly( buf, ( int64_t )LARGE );

    gridfs_init( conn, "test", "fs", gfs );
    gridfile_writer_init( gfile, gfs, "medium", "text/html", GRIDFILE_DEFAULT );

    gridfile_write_buffer( gfile, medium, MEDIUM );
    gridfile_write_buffer( gfile, medium + MEDIUM, MEDIUM );
    gridfile_writer_done( gfile );
    test_gridfile( gfs, medium, 2 * MEDIUM, "medium", "text/html" );
    gridfs_destroy( gfs );

    gridfs_init( conn, "test", "fs", gfs );

    gridfs_store_buffer( gfs, small, LOWER, "small", "text/html", GRIDFILE_DEFAULT );
    test_gridfile( gfs, small, LOWER, "small", "text/html" );
    gridfs_destroy( gfs );

    gridfs_init( conn, "test", "fs", gfs );
    gridfs_remove_filename( gfs, "large" );
    gridfile_writer_init( gfile, gfs, "large", "text/html", GRIDFILE_DEFAULT );
    for( n=0; n < ( LARGE / 1024 ); n++ ) {
        gridfile_write_buffer( gfile, buf + ( n * 1024 ), 1024 );
    }
    gridfile_writer_done( gfile );
    test_gridfile( gfs, buf, LARGE, "large", "text/html" );

    gridfs_destroy( gfs );
    mongo_destroy( conn );
    free( buf );
    free( small );
    free( medium );
}
Exemplo n.º 2
0
void test_large() {
    mongo_connection conn[1];
    mongo_connection_options opts;
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char buffer[LARGE];
    int64_t filesize = (int64_t)1024 * (int64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    strncpy(opts.host, "127.0.0.1", 255);
    opts.host[254] = '\0';
    opts.port = 27017;

    if (mongo_connect( conn , &opts )) {
        printf("failed to connect\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (int64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
        fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
        gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_destroy(conn);
}
Exemplo n.º 3
0
void test_large() {
    mongo_connection conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char *buffer = malloc( LARGE );
    if( buffer == NULL ) {
        printf("Failed to allocate memory.");
        exit(1);
    }
    uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    if (mongo_connect( conn, TEST_SERVER, 27017 )){
        printf("failed to connect 1\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (uint64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
      fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
      gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_disconnect(conn);
    mongo_destroy(conn);
}
Exemplo n.º 4
0
void test_streaming() {
    mongo_connection conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    char *buf = malloc( LARGE );
    char *small = malloc( LOWER );
    if( buf == NULL || small == NULL ) {
        printf("Failed to allocate");
        exit(1);
    }
    int n;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    if (mongo_connect( conn , TEST_SERVER, 27017 )){
        printf("failed to connect 3\n");
        exit(1);
    }

    fill_buffer_randomly(small, (uint64_t)LOWER);
    fill_buffer_randomly(buf, (uint64_t)LARGE);

    gridfs_init(conn, "test", "fs", gfs);

    gridfs_store_buffer(gfs, small, LOWER, "small", "text/html");
    test_gridfile(gfs, small, LOWER, "small", "text/html");
    gridfs_destroy(gfs);

    gridfs_init(conn, "test", "fs", gfs);
    gridfile_writer_init(gfile, gfs, "large", "text/html");
    for(n=0; n < (LARGE / 1024); n++) {
      gridfile_write_buffer(gfile, buf + (n * 1024), 1024);
    }
    gridfile_writer_done( gfile );
    test_gridfile(gfs, buf, LARGE, "large", "text/html");

    gridfs_destroy(gfs);
    mongo_destroy(conn);
    free(buf);
    free(small);
}
Exemplo n.º 5
0
void test_streaming() {
    mongo_connection conn[1];
    mongo_connection_options opts;
    gridfs gfs[1];
    gridfile gfile[1];
    char buf[LARGE];
    char small[LOWER];
    int n;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    strncpy(opts.host, "127.0.0.1", 255);
    opts.host[254] = '\0';
    opts.port = 27017;

    if (mongo_connect( conn , &opts )) {
        printf("failed to connect\n");
        exit(1);
    }

    fill_buffer_randomly(small, (int64_t)LOWER);
    fill_buffer_randomly(buf, (int64_t)LARGE);

    gridfs_init(conn, "test", "fs", gfs);

    gridfs_store_buffer(gfs, small, LOWER, "small", "text/html");
    test_gridfile(gfs, small, LOWER, "small", "text/html");
    gridfs_destroy(gfs);

    gridfs_init(conn, "test", "fs", gfs);
    gridfile_writer_init(gfile, gfs, "large", "text/html");
    for(n=0; n < (LARGE / 1024); n++) {
        gridfile_write_buffer(gfile, buf + (n * 1024), 1024);
    }
    gridfile_writer_done( gfile );
    test_gridfile(gfs, buf, LARGE, "large", "text/html");

    gridfs_destroy(gfs);
    mongo_destroy(conn);
}
Exemplo n.º 6
0
SEXP mongo_gridfile_writer_create(SEXP gfs, SEXP remotename, SEXP contenttype) {
    gridfs* _gfs = _checkGridfs(gfs);
    const char* _remotename = CHAR(STRING_ELT(remotename, 0));
    const char* _contenttype = CHAR(STRING_ELT(contenttype, 0));
    gridfile* gfile = Calloc(1, gridfile);

    SEXP ret, ptr, cls;
    PROTECT(ret = allocVector(INTSXP, 1));
    INTEGER(ret)[0] = 0;
 /* prevent GC on gridfs object while gridfile alive */
    setAttrib(ret, sym_mongo_gridfs, gfs);
    ptr = R_MakeExternalPtr(gfile, sym_mongo_gridfile, R_NilValue);
    PROTECT(ptr);
    R_RegisterCFinalizerEx(ptr, mongoGridfileFinalizer, TRUE);
    setAttrib(ret, sym_mongo_gridfile, ptr);
    PROTECT(cls = allocVector(STRSXP, 1));
    SET_STRING_ELT(cls, 0, mkChar("mongo.gridfile.writer"));
    classgets(ret, cls);
    gridfile_writer_init(gfile, _gfs, _remotename, _contenttype);
    UNPROTECT(3);
    return ret;
}
Exemplo n.º 7
0
void clsRasterData::outputToMongoDB(map<string,float> header, int nValid, float** position, float* value,string remoteFilename, gridfs* gfs)
{
	float noData = -9999.0f;
	// prepare binary data
	int rows = int(header["NROWS"]);
	int cols = int(header["NCOLS"]);
	float *data = new float[rows*cols];
	
	int index = 0;
	int dataIndex = 0;
	for (int i = 0; i < rows; ++i)
	{
		for (int j = 0; j < cols; ++j)
		{
			dataIndex = i*cols + j;
			if(index < nValid)
			{
				if(position[index][0] == i && position[index][1] == j) 
				{
					data[dataIndex] = value[index];	
					index++;
				}
				else 
					data[dataIndex] = noData;
			}
			else 
				data[dataIndex] = noData;				
		}
	}
	
	bson *p = (bson*)malloc(sizeof(bson));
	bson_init(p);
	bson_append_string(p, "ID", remoteFilename.c_str());
	bson_append_double(p, "CELLSIZE", header["CELLSIZE"]);
	bson_append_double(p, "NODATA_VALUE", noData);
	bson_append_double(p, "NCOLS", cols);
	bson_append_double(p, "NROWS", rows);
	bson_append_double(p, "XLLCENTER", header["XLLCENTER"]);
	bson_append_double(p, "YLLCENTER", header["YLLCENTER"]);
	bson_finish(p);
		
	gridfile gfile[1];
	gridfile_writer_init(gfile, gfs, remoteFilename.c_str(), "float");

	size_t iID = remoteFilename.find_first_of('_'); 
	int subbasinID = atoi(remoteFilename.substr(0, iID).c_str());
	gfile->id.ints[0] = subbasinID;
	
	for (int k = 0; k < rows; k++)
	{
		gridfile_write_buffer(gfile, (const char*)(data+cols*k), sizeof(float)*cols);
	}
	gridfile_set_metadata(gfile, p);
	int response = gridfile_writer_done(gfile);
	//cout << remoteFilename << "\t" << gfile->id.ints[0] <<  gfile->id.ints[1] << gfile->id.ints[2] << endl;

	gridfile_destroy(gfile);	
	bson_destroy(p);
	free(p);
	
	delete data;
}
Exemplo n.º 8
0
static
void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type )
{
    gridfile gfile[1];
    FILE *stream;
#ifdef	DYING
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
#endif
    char hex_digest[33];
    int64_t i = length;
    int n;
    char *data_after = (char*)bson_malloc( LARGE );
    int truncBytes;
    char* lowerName;

    ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK);
    ASSERT( gridfile_exists( gfile ) );

    stream = fopen( "output", "w+" );
    gridfile_write_file( gfile, stream );
    fseek( stream, 0, SEEK_SET );
    ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) );
    fclose( stream );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    gridfile_read_buffer( gfile, data_after, length );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1);    
    strcpy( lowerName, filename );
    _strlwr( lowerName );
    
    ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 );
    bson_free( lowerName );

    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ;

    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    if( !( gfile->flags & GRIDFILE_COMPRESS ) ) {
#ifdef	DYING
      mongo_md5_init( pms );

      n = 0;
      while( i > INT_MAX  ) {
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX );
          i -= INT_MAX;
          n += 1;
      }
      if( i > 0 )
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i );

      mongo_md5_finish( pms, digest );
      digest2hex( digest, hex_digest );
#else
      { DIGEST_CTX ctx = rpmDigestInit(PGPHASHALGO_MD5, RPMDIGEST_NONE);
        const char * _digest = NULL;
        int xx;
        while( i > INT_MAX  ) {
          xx = rpmDigestUpdate(ctx, (char *)data_before + (n*INT_MAX), INT_MAX);
          i -= INT_MAX;
          n += 1;
	}
        xx = rpmDigestFinal(ctx, &_digest, NULL, 1);
        strncpy(hex_digest, _digest, 32+1);
	hex_digest[32] = '\0';
        _digest = _free(_digest);
      }
#endif

      ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );
    }

    truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); 
    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes));
    gridfile_writer_done( gfile );

    gridfile_seek(gfile, 0);
    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) );
    ASSERT( gridfile_read_buffer( gfile, data_after, length ) ==  (size_t)(length - truncBytes));
    ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 );

    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    gridfile_truncate(gfile, 0);
    gridfile_writer_done( gfile );

    ASSERT( gridfile_get_contentlength( gfile ) == 0 );
    ASSERT( gridfile_read_buffer( gfile, data_after, length ) == 0 );

    gridfile_destroy( gfile );
    ASSERT( gridfs_remove_filename( gfs, filename ) == MONGO_OK );
    free( data_after );
    gridfs_test_unlink( "output" );
}
Exemplo n.º 9
0
static
void test_large( void )
{
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    size_t i, n;
    char *buffer = (char*)bson_malloc( LARGE );
    char *read_buf = (char*)bson_malloc( LARGE );
    gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE;
    mongo_write_concern wc;    
    bson lastError;
    bson lastErrorCmd;
    
    srand( (unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    
    mongo_write_concern_init(&wc);
    wc.j = 1;
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    GFS_INIT;

    fd = fopen( "bigfile", "r" );
    if( fd ) {
      fclose( fd );
    } else {
      /* Create a very large file */
      fill_buffer_randomly( buffer, ( int64_t )LARGE );
      fd = fopen( "bigfile", "w" );
      for( i=0; i<1024; i++ ) {
        fwrite( buffer, 1, LARGE, fd );
      }
      fclose( fd );
    }

    /* Now read the file into GridFS */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS);

    gridfs_find_filename( gfs, "bigfile", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    
    fd = fopen( "bigfile", "r" );

    while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) {
      ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n );
      ASSERT( memcmp( buffer, read_buf, n ) == 0 );
    }

    fclose( fd );
    gridfile_destroy( gfile );

    /* Read the file using the streaming interface */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_remove_filename( gfs, "bigfile-stream" );
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS );

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fd = fopen( "bigfile", "r" );
    i = 0;
    while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) {
        ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n );     
        if(i++ % 10 == 0) {
          bson_init( &lastErrorCmd );
          bson_append_int( &lastErrorCmd, "getLastError", 1);
          bson_append_int( &lastErrorCmd, "j", 1);
          bson_finish( &lastErrorCmd );

          bson_init( &lastError );
          mongo_run_command( conn, "test", &lastErrorCmd, &lastError );

          bson_destroy( &lastError );
          bson_destroy( &lastErrorCmd );
        }
    }

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 1; /* Let's reset write concern j field to 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fclose( fd );
    gridfile_writer_done( gfile );

    gridfs_find_filename( gfs, "bigfile-stream", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    gridfs_remove_filename( gfs, "bigfile-stream" );

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );

    bson_free( buffer );
    bson_free( read_buf );
    mongo_write_concern_destroy( &wc );
}
Exemplo n.º 10
0
static
void test_random_write2( void ) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    bson meta;
    char *buf = (char*)bson_malloc( LARGE );
    char *zeroedbuf = (char*)bson_malloc( LARGE );
    int n;

    if( buf == NULL ) {
        printf( "Failed to allocate" );
        exit( 1 );
    }

    srand( 123 ); // Init with a predictable value

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;

    fill_buffer_randomly( buf, ( int64_t )LARGE );
    memset( zeroedbuf, 0, LARGE ); 

    bson_init_empty( &meta );

    GFS_INIT;

    /* This portion of the test we will write zeroes by using new API gridfile_set_size
       function gridfile_expand tested implicitally by using gridfile_set_size making file larger */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );
    gridfile_set_size( gfile, LARGE ); // New API, this zero fills the file
    gridfile_writer_done( gfile );    
    test_gridfile( gfs, zeroedbuf, LARGE, "random_access", "text/html" ); // Test zero filled file

    /* This portion of the test we will write zeroes by using new API gridfile_set_size then we will truncate the file */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );
    gridfile_set_size( gfile, LARGE ); // New API, this zero fills the file with LARGE bytes
    gridfile_truncate( gfile, LARGE / 2 ); // Let's truncate the file now
    gridfile_writer_done( gfile );    
    test_gridfile( gfs, zeroedbuf, LARGE / 2, "random_access", "text/html" ); // Test zero filled file truncated by half

    /* Let's re-create the file, now let's randomly write real data */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );   
    gridfile_set_size( gfile, LARGE ); // We need to reserve LARGE bytes on file before writing backwards

    /* Here we will write backwards the file with our random numbers. We will use a 3072 size buffer to cross over buffers
       given the fact 256K is not a multiple of 3072. Let's stress the buffering logic a little bit */
    for( n = LARGE / 3072 - 1; n >= 0; n-- ) {
        gridfile_seek( gfile, 3072 * n );
        ASSERT( gridfile_write_buffer( gfile, buf + ( n * 3072 ), 3072 ) == 3072 );
    }
    gridfile_writer_done( gfile );
    test_gridfile( gfs, buf, LARGE, "random_access", "text/html" );

    gridfs_destroy( gfs );
    mongo_destroy( conn );

    free( buf );
    free( zeroedbuf );
}
Exemplo n.º 11
0
static
void test_random_write(void) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile* gfile;
    char *data_before = (char*)bson_malloc( UPPER );
    char *random_data = (char*)bson_malloc( UPPER );
    char *buf = (char*) bson_malloc( UPPER );
    int64_t i;
    FILE *fd;

    srand((unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    GFS_INIT;

    fill_buffer_randomly( data_before, UPPER );
    fill_buffer_randomly( random_data, UPPER );
    for ( i = LOWER; i <= UPPER; i += DELTA ) {
        int64_t j = i / 2 - 3;
        gridfs_offset bytes_to_write_first;
        int n;

        /* Input from buffer */
        gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_DEFAULT );
        if ( i > DEFAULT_CHUNK_SIZE * 4 ) {
          n = DEFAULT_CHUNK_SIZE * 3 + 6;
          memcpy(&data_before[j], random_data, n); // Let's overwrite the buffer with bytes crossing multiple chunks
          bytes_to_write_first = 10;
        } else {
          n = 6;
          memcpy(random_data, "123456", n);
          strncpy(&data_before[j], random_data, n); // Let's overwrite the buffer with a few some bytes
          bytes_to_write_first = 0;
        }
        gfile = gridfile_create();
        ASSERT(gridfs_find_filename(gfs, "input-buffer", gfile) == 0);
        gridfile_writer_init(gfile, gfs, "input-buffer", "text/html", GRIDFILE_DEFAULT );
        gridfile_seek(gfile, j); // Seek into the same buffer position within the GridFS file
        if ( bytes_to_write_first ) {
          ASSERT( gridfile_write_buffer(gfile, random_data, bytes_to_write_first) == bytes_to_write_first ); // Let's write 10 bytes first, and later the rest
        }
        ASSERT( gridfile_write_buffer(gfile, &random_data[bytes_to_write_first], n - bytes_to_write_first) == n - bytes_to_write_first ); // Try to write to the existing GridFS file on the position given by j
        gridfile_seek(gfile, j);
        gridfile_read_buffer( gfile, buf, n );
        ASSERT(memcmp( buf, &data_before[j], n) == 0);

        gridfile_writer_done(gfile);
        ASSERT(gfile->pos == (gridfs_offset)(j + n));
        gridfile_dealloc(gfile);
        test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-buffer", "text/html" );

        /* Input from file */
        fd = fopen( "input-file", "w" );
        fwrite( data_before, sizeof( char ), (size_t) (j + n > i ? j + n : i), fd );
        fclose( fd );
        gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT );
        test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-file", "text/html" );
    }

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );
    free( data_before );
    free( random_data );
    free( buf );

    /* Clean up files. */
    gridfs_test_unlink( "input-file" );
    gridfs_test_unlink( "output" );   
}
Exemplo n.º 12
0
EXPORT void mongo_gridfile_writer_create(struct gridfs_* gfs, char* remoteName, char* contentType, struct gridfile_** gf) {
    gridfile* gf_ = (gridfile*)malloc(sizeof(gridfile));
    gridfile_writer_init(gf_, (gridfs*)gfs, remoteName, contentType);
    *gf = (struct gridfile_*)gf_;
}
Exemplo n.º 13
0
void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type ) {
    gridfile gfile[1];
    FILE *stream;
    mongo_md5_state_t pms[1];
    mongo_md5_byte_t digest[16];
    char hex_digest[33];
    int64_t i = length;
    int n;
    char *data_after = (char*)bson_malloc( LARGE );
    int truncBytes;
    char* lowerName;

    ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK);
    ASSERT( gridfile_exists( gfile ) );

    stream = fopen( "output", "w+" );
    gridfile_write_file( gfile, stream );
    fseek( stream, 0, SEEK_SET );
    ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) );
    fclose( stream );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    gridfile_read( gfile, length, data_after );
    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1);
    strcpy( lowerName, filename);
    _strlwr( lowerName );
    ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 );
    bson_free( lowerName );

    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length );

    ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE );

    ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ;

    ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 );

    if( !( gfile->flags & GRIDFILE_COMPRESS ) ) {
      mongo_md5_init( pms );

      n = 0;
      while( i > INT_MAX  ) {
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX );
          i -= INT_MAX;
          n += 1;
      }
      if( i > 0 )
          mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i );

      mongo_md5_finish( pms, digest );
      digest2hex( digest, hex_digest );
      ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 );
    }

    truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); 
    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes));
    gridfile_writer_done( gfile );

    gridfile_seek(gfile, 0);
    ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) );
    ASSERT( gridfile_read( gfile, length, data_after ) ==  (size_t)(length - truncBytes));
    ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 );

    gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT);
    gridfile_truncate(gfile, 0);
    gridfile_writer_done( gfile );

    ASSERT( gridfile_get_contentlength( gfile ) == 0 );
    ASSERT( gridfile_read( gfile, length, data_after ) == 0 );

    gridfile_destroy( gfile );
    gridfs_remove_filename( gfs, filename );
    free( data_after );
    _unlink( "output" );
}