Beispiel #1
0
void test_streaming( void ) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    char *medium = (char*)bson_malloc( 2*MEDIUM );
    char *small = (char*)bson_malloc( LOWER );
    char *buf = (char*)bson_malloc( LARGE );
    int n;

    if( buf == NULL || small == NULL ) {
        printf( "Failed to allocate" );
        exit( 1 );
    }

    srand( (unsigned int)time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;

    if ( mongo_client( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect 3\n" );
        exit( 1 );
    }

    fill_buffer_randomly( medium, ( int64_t )2 * MEDIUM );
    fill_buffer_randomly( small, ( int64_t )LOWER );
    fill_buffer_randomly( buf, ( int64_t )LARGE );

    gridfs_init( conn, "test", "fs", gfs );
    gridfile_writer_init( gfile, gfs, "medium", "text/html", GRIDFILE_DEFAULT );

    gridfile_write_buffer( gfile, medium, MEDIUM );
    gridfile_write_buffer( gfile, medium + MEDIUM, MEDIUM );
    gridfile_writer_done( gfile );
    test_gridfile( gfs, medium, 2 * MEDIUM, "medium", "text/html" );
    gridfs_destroy( gfs );

    gridfs_init( conn, "test", "fs", gfs );

    gridfs_store_buffer( gfs, small, LOWER, "small", "text/html", GRIDFILE_DEFAULT );
    test_gridfile( gfs, small, LOWER, "small", "text/html" );
    gridfs_destroy( gfs );

    gridfs_init( conn, "test", "fs", gfs );
    gridfs_remove_filename( gfs, "large" );
    gridfile_writer_init( gfile, gfs, "large", "text/html", GRIDFILE_DEFAULT );
    for( n=0; n < ( LARGE / 1024 ); n++ ) {
        gridfile_write_buffer( gfile, buf + ( n * 1024 ), 1024 );
    }
    gridfile_writer_done( gfile );
    test_gridfile( gfs, buf, LARGE, "large", "text/html" );

    gridfs_destroy( gfs );
    mongo_destroy( conn );
    free( buf );
    free( small );
    free( medium );
}
EXPORT void mongo_gridfile_writer_write(struct gridfile_* gf, mxArray* data) {
    uint64_t size;
    void* p;
    int cplx = mxIsComplex(data);
    if (cplx && mxGetClassID(data) != mxDOUBLE_CLASS)
        mexErrMsgTxt("GridfileWriter:write - only complex values of type double are supported");
    p = calcSize(data, &size);
    gridfile_write_buffer((gridfile*)gf, (char*)p, size);
    if (cplx)
        gridfile_write_buffer((gridfile*)gf, (char*)mxGetPi(data), size);
}
Beispiel #3
0
void test_large() {
    mongo_connection conn[1];
    mongo_connection_options opts;
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char buffer[LARGE];
    int64_t filesize = (int64_t)1024 * (int64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    strncpy(opts.host, "127.0.0.1", 255);
    opts.host[254] = '\0';
    opts.port = 27017;

    if (mongo_connect( conn , &opts )) {
        printf("failed to connect\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (int64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
        fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
        gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_destroy(conn);
}
void test_large() {
    mongo_connection conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    int i, n;
    char *buffer = malloc( LARGE );
    if( buffer == NULL ) {
        printf("Failed to allocate memory.");
        exit(1);
    }
    uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    if (mongo_connect( conn, TEST_SERVER, 27017 )){
        printf("failed to connect 1\n");
        exit(1);
    }

    gridfs_init(conn, "test", "fs", gfs);

    /* Create a very large file */
    fill_buffer_randomly(buffer, (uint64_t)LARGE);
    fd = fopen("bigfile", "w");
    for(i=0; i<1024; i++) {
      fwrite(buffer, 1, LARGE, fd);
    }
    fclose(fd);

    /* Now read the file into GridFS */
    gridfs_store_file(gfs, "bigfile", "bigfile", "text/html");

    gridfs_find_filename(gfs, "bigfile", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    /* Read the file using the streaming interface */
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html");

    fd = fopen("bigfile", "r");

    while((n = fread(buffer, 1, 1024, fd)) != 0) {
      gridfile_write_buffer(gfile, buffer, n);
    }
    gridfile_writer_done( gfile );

    gridfs_find_filename(gfs, "bigfile-stream", gfile);

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );

    gridfs_destroy(gfs);
    mongo_disconnect(conn);
    mongo_destroy(conn);
}
void test_streaming() {
    mongo_connection conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    char *buf = malloc( LARGE );
    char *small = malloc( LOWER );
    if( buf == NULL || small == NULL ) {
        printf("Failed to allocate");
        exit(1);
    }
    int n;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    if (mongo_connect( conn , TEST_SERVER, 27017 )){
        printf("failed to connect 3\n");
        exit(1);
    }

    fill_buffer_randomly(small, (uint64_t)LOWER);
    fill_buffer_randomly(buf, (uint64_t)LARGE);

    gridfs_init(conn, "test", "fs", gfs);

    gridfs_store_buffer(gfs, small, LOWER, "small", "text/html");
    test_gridfile(gfs, small, LOWER, "small", "text/html");
    gridfs_destroy(gfs);

    gridfs_init(conn, "test", "fs", gfs);
    gridfile_writer_init(gfile, gfs, "large", "text/html");
    for(n=0; n < (LARGE / 1024); n++) {
      gridfile_write_buffer(gfile, buf + (n * 1024), 1024);
    }
    gridfile_writer_done( gfile );
    test_gridfile(gfs, buf, LARGE, "large", "text/html");

    gridfs_destroy(gfs);
    mongo_destroy(conn);
    free(buf);
    free(small);
}
Beispiel #6
0
void test_streaming() {
    mongo_connection conn[1];
    mongo_connection_options opts;
    gridfs gfs[1];
    gridfile gfile[1];
    char buf[LARGE];
    char small[LOWER];
    int n;

    srand(time(NULL));

    INIT_SOCKETS_FOR_WINDOWS;

    strncpy(opts.host, "127.0.0.1", 255);
    opts.host[254] = '\0';
    opts.port = 27017;

    if (mongo_connect( conn , &opts )) {
        printf("failed to connect\n");
        exit(1);
    }

    fill_buffer_randomly(small, (int64_t)LOWER);
    fill_buffer_randomly(buf, (int64_t)LARGE);

    gridfs_init(conn, "test", "fs", gfs);

    gridfs_store_buffer(gfs, small, LOWER, "small", "text/html");
    test_gridfile(gfs, small, LOWER, "small", "text/html");
    gridfs_destroy(gfs);

    gridfs_init(conn, "test", "fs", gfs);
    gridfile_writer_init(gfile, gfs, "large", "text/html");
    for(n=0; n < (LARGE / 1024); n++) {
        gridfile_write_buffer(gfile, buf + (n * 1024), 1024);
    }
    gridfile_writer_done( gfile );
    test_gridfile(gfs, buf, LARGE, "large", "text/html");

    gridfs_destroy(gfs);
    mongo_destroy(conn);
}
Beispiel #7
0
void clsRasterData::outputToMongoDB(map<string,float> header, int nValid, float** position, float* value,string remoteFilename, gridfs* gfs)
{
	float noData = -9999.0f;
	// prepare binary data
	int rows = int(header["NROWS"]);
	int cols = int(header["NCOLS"]);
	float *data = new float[rows*cols];
	
	int index = 0;
	int dataIndex = 0;
	for (int i = 0; i < rows; ++i)
	{
		for (int j = 0; j < cols; ++j)
		{
			dataIndex = i*cols + j;
			if(index < nValid)
			{
				if(position[index][0] == i && position[index][1] == j) 
				{
					data[dataIndex] = value[index];	
					index++;
				}
				else 
					data[dataIndex] = noData;
			}
			else 
				data[dataIndex] = noData;				
		}
	}
	
	bson *p = (bson*)malloc(sizeof(bson));
	bson_init(p);
	bson_append_string(p, "ID", remoteFilename.c_str());
	bson_append_double(p, "CELLSIZE", header["CELLSIZE"]);
	bson_append_double(p, "NODATA_VALUE", noData);
	bson_append_double(p, "NCOLS", cols);
	bson_append_double(p, "NROWS", rows);
	bson_append_double(p, "XLLCENTER", header["XLLCENTER"]);
	bson_append_double(p, "YLLCENTER", header["YLLCENTER"]);
	bson_finish(p);
		
	gridfile gfile[1];
	gridfile_writer_init(gfile, gfs, remoteFilename.c_str(), "float");

	size_t iID = remoteFilename.find_first_of('_'); 
	int subbasinID = atoi(remoteFilename.substr(0, iID).c_str());
	gfile->id.ints[0] = subbasinID;
	
	for (int k = 0; k < rows; k++)
	{
		gridfile_write_buffer(gfile, (const char*)(data+cols*k), sizeof(float)*cols);
	}
	gridfile_set_metadata(gfile, p);
	int response = gridfile_writer_done(gfile);
	//cout << remoteFilename << "\t" << gfile->id.ints[0] <<  gfile->id.ints[1] << gfile->id.ints[2] << endl;

	gridfile_destroy(gfile);	
	bson_destroy(p);
	free(p);
	
	delete data;
}
Beispiel #8
0
static
void test_large( void )
{
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    size_t i, n;
    char *buffer = (char*)bson_malloc( LARGE );
    char *read_buf = (char*)bson_malloc( LARGE );
    gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE;
    mongo_write_concern wc;    
    bson lastError;
    bson lastErrorCmd;
    
    srand( (unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    
    mongo_write_concern_init(&wc);
    wc.j = 1;
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    GFS_INIT;

    fd = fopen( "bigfile", "r" );
    if( fd ) {
      fclose( fd );
    } else {
      /* Create a very large file */
      fill_buffer_randomly( buffer, ( int64_t )LARGE );
      fd = fopen( "bigfile", "w" );
      for( i=0; i<1024; i++ ) {
        fwrite( buffer, 1, LARGE, fd );
      }
      fclose( fd );
    }

    /* Now read the file into GridFS */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS);

    gridfs_find_filename( gfs, "bigfile", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    
    fd = fopen( "bigfile", "r" );

    while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) {
      ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n );
      ASSERT( memcmp( buffer, read_buf, n ) == 0 );
    }

    fclose( fd );
    gridfile_destroy( gfile );

    /* Read the file using the streaming interface */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_remove_filename( gfs, "bigfile-stream" );
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS );

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fd = fopen( "bigfile", "r" );
    i = 0;
    while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) {
        ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n );     
        if(i++ % 10 == 0) {
          bson_init( &lastErrorCmd );
          bson_append_int( &lastErrorCmd, "getLastError", 1);
          bson_append_int( &lastErrorCmd, "j", 1);
          bson_finish( &lastErrorCmd );

          bson_init( &lastError );
          mongo_run_command( conn, "test", &lastErrorCmd, &lastError );

          bson_destroy( &lastError );
          bson_destroy( &lastErrorCmd );
        }
    }

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 1; /* Let's reset write concern j field to 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fclose( fd );
    gridfile_writer_done( gfile );

    gridfs_find_filename( gfs, "bigfile-stream", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    gridfs_remove_filename( gfs, "bigfile-stream" );

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );

    bson_free( buffer );
    bson_free( read_buf );
    mongo_write_concern_destroy( &wc );
}
Beispiel #9
0
static
void test_random_write2( void ) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    bson meta;
    char *buf = (char*)bson_malloc( LARGE );
    char *zeroedbuf = (char*)bson_malloc( LARGE );
    int n;

    if( buf == NULL ) {
        printf( "Failed to allocate" );
        exit( 1 );
    }

    srand( 123 ); // Init with a predictable value

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;

    fill_buffer_randomly( buf, ( int64_t )LARGE );
    memset( zeroedbuf, 0, LARGE ); 

    bson_init_empty( &meta );

    GFS_INIT;

    /* This portion of the test we will write zeroes by using new API gridfile_set_size
       function gridfile_expand tested implicitally by using gridfile_set_size making file larger */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );
    gridfile_set_size( gfile, LARGE ); // New API, this zero fills the file
    gridfile_writer_done( gfile );    
    test_gridfile( gfs, zeroedbuf, LARGE, "random_access", "text/html" ); // Test zero filled file

    /* This portion of the test we will write zeroes by using new API gridfile_set_size then we will truncate the file */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );
    gridfile_set_size( gfile, LARGE ); // New API, this zero fills the file with LARGE bytes
    gridfile_truncate( gfile, LARGE / 2 ); // Let's truncate the file now
    gridfile_writer_done( gfile );    
    test_gridfile( gfs, zeroedbuf, LARGE / 2, "random_access", "text/html" ); // Test zero filled file truncated by half

    /* Let's re-create the file, now let's randomly write real data */
    gridfile_init( gfs, &meta, gfile );
    gridfile_writer_init( gfile, gfs, "random_access", "text/html", 0 );   
    gridfile_set_size( gfile, LARGE ); // We need to reserve LARGE bytes on file before writing backwards

    /* Here we will write backwards the file with our random numbers. We will use a 3072 size buffer to cross over buffers
       given the fact 256K is not a multiple of 3072. Let's stress the buffering logic a little bit */
    for( n = LARGE / 3072 - 1; n >= 0; n-- ) {
        gridfile_seek( gfile, 3072 * n );
        ASSERT( gridfile_write_buffer( gfile, buf + ( n * 3072 ), 3072 ) == 3072 );
    }
    gridfile_writer_done( gfile );
    test_gridfile( gfs, buf, LARGE, "random_access", "text/html" );

    gridfs_destroy( gfs );
    mongo_destroy( conn );

    free( buf );
    free( zeroedbuf );
}
Beispiel #10
0
static
void test_random_write(void) {
    mongo conn[1];
    gridfs gfs[1];
    gridfile* gfile;
    char *data_before = (char*)bson_malloc( UPPER );
    char *random_data = (char*)bson_malloc( UPPER );
    char *buf = (char*) bson_malloc( UPPER );
    int64_t i;
    FILE *fd;

    srand((unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    GFS_INIT;

    fill_buffer_randomly( data_before, UPPER );
    fill_buffer_randomly( random_data, UPPER );
    for ( i = LOWER; i <= UPPER; i += DELTA ) {
        int64_t j = i / 2 - 3;
        gridfs_offset bytes_to_write_first;
        int n;

        /* Input from buffer */
        gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_DEFAULT );
        if ( i > DEFAULT_CHUNK_SIZE * 4 ) {
          n = DEFAULT_CHUNK_SIZE * 3 + 6;
          memcpy(&data_before[j], random_data, n); // Let's overwrite the buffer with bytes crossing multiple chunks
          bytes_to_write_first = 10;
        } else {
          n = 6;
          memcpy(random_data, "123456", n);
          strncpy(&data_before[j], random_data, n); // Let's overwrite the buffer with a few some bytes
          bytes_to_write_first = 0;
        }
        gfile = gridfile_create();
        ASSERT(gridfs_find_filename(gfs, "input-buffer", gfile) == 0);
        gridfile_writer_init(gfile, gfs, "input-buffer", "text/html", GRIDFILE_DEFAULT );
        gridfile_seek(gfile, j); // Seek into the same buffer position within the GridFS file
        if ( bytes_to_write_first ) {
          ASSERT( gridfile_write_buffer(gfile, random_data, bytes_to_write_first) == bytes_to_write_first ); // Let's write 10 bytes first, and later the rest
        }
        ASSERT( gridfile_write_buffer(gfile, &random_data[bytes_to_write_first], n - bytes_to_write_first) == n - bytes_to_write_first ); // Try to write to the existing GridFS file on the position given by j
        gridfile_seek(gfile, j);
        gridfile_read_buffer( gfile, buf, n );
        ASSERT(memcmp( buf, &data_before[j], n) == 0);

        gridfile_writer_done(gfile);
        ASSERT(gfile->pos == (gridfs_offset)(j + n));
        gridfile_dealloc(gfile);
        test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-buffer", "text/html" );

        /* Input from file */
        fd = fopen( "input-file", "w" );
        fwrite( data_before, sizeof( char ), (size_t) (j + n > i ? j + n : i), fd );
        fclose( fd );
        gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT );
        test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-file", "text/html" );
    }

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );
    free( data_before );
    free( random_data );
    free( buf );

    /* Clean up files. */
    gridfs_test_unlink( "input-file" );
    gridfs_test_unlink( "output" );   
}
Beispiel #11
0
SEXP mongo_gridfile_writer_write(SEXP gfw, SEXP raw) {
    gridfile* gfile = _checkGridfileWriter(gfw);
    int len = LENGTH(raw);
    if (len) gridfile_write_buffer(gfile, (char*)RAW(raw), len);
    return R_NilValue;
}