int test_insert_limits( const char *set_name ) {
    char version[10];
    mongo conn[1];
    mongo_write_concern wc[1];
    int i;
    char key[10];
    int res = 0;
    bson b[1], b2[1];
    bson *objs[2];

    mongo_write_concern_init( wc );
    wc->w = 1;
    mongo_write_concern_finish( wc );

    /* We'll perform the full test if we're running v2.0 or later. */
    if( mongo_get_server_version( version ) != -1 && version[0] <= '1' )
        return 0;

    mongo_replset_init( conn, set_name );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT );
    res = mongo_replset_connect( conn );

    if( res != MONGO_OK ) {
        res = conn->err;
        return res;
    }

    ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE );

    bson_init( b );
    for(i=0; i<1200000; i++) {
        sprintf( key, "%d", i + 10000000 );
        bson_append_int( b, key, i );
    }
    bson_finish( b );

    ASSERT( bson_size( b ) > conn->max_bson_size );

    ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_clear_errors( conn );
    ASSERT( conn->err == 0 );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "test.foo", (const bson**)objs, 2, wc, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_write_concern_destroy( wc );

    return 0;
}
Exemplo n.º 2
0
static void test_write_concern_input( mongo *conn ) {
    mongo_write_concern wc[1], wcbad[1];
    bson b[1];

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );

    bson_init( b );
    bson_append_new_oid( b, "_id" );
    bson_finish( b );

    mongo_write_concern_init( wc );    
    mongo_write_concern_set_w( wc, 1 );

    /* Failure to finish write concern object. */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_CONCERN_INVALID );
    ASSERT_EQUAL_STRINGS( conn->errstr,
        "Must call mongo_write_concern_finish() before using *write_concern." );

    mongo_write_concern_finish( wc );

    /* Use a bad write concern. */
    mongo_clear_errors( conn );
    mongo_write_concern_init( wcbad );    
    mongo_write_concern_set_w( wcbad, 2 );
    mongo_write_concern_finish( wcbad );
    mongo_set_write_concern( conn, wcbad );
    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "norepl" );

    /* Ensure that supplied write concern overrides default. */
    mongo_clear_errors( conn );
    ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    conn->write_concern = NULL;
    mongo_write_concern_destroy( wc );
    mongo_write_concern_destroy( wcbad );
    bson_destroy( b );
}
Exemplo n.º 3
0
static
void test_large( void )
{
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    size_t i, n;
    char *buffer = (char*)bson_malloc( LARGE );
    char *read_buf = (char*)bson_malloc( LARGE );
    gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE;
    mongo_write_concern wc;    
    bson lastError;
    bson lastErrorCmd;
    
    srand( (unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    
    mongo_write_concern_init(&wc);
    wc.j = 1;
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    GFS_INIT;

    fd = fopen( "bigfile", "r" );
    if( fd ) {
      fclose( fd );
    } else {
      /* Create a very large file */
      fill_buffer_randomly( buffer, ( int64_t )LARGE );
      fd = fopen( "bigfile", "w" );
      for( i=0; i<1024; i++ ) {
        fwrite( buffer, 1, LARGE, fd );
      }
      fclose( fd );
    }

    /* Now read the file into GridFS */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS);

    gridfs_find_filename( gfs, "bigfile", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    
    fd = fopen( "bigfile", "r" );

    while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) {
      ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n );
      ASSERT( memcmp( buffer, read_buf, n ) == 0 );
    }

    fclose( fd );
    gridfile_destroy( gfile );

    /* Read the file using the streaming interface */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_remove_filename( gfs, "bigfile-stream" );
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS );

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fd = fopen( "bigfile", "r" );
    i = 0;
    while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) {
        ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n );     
        if(i++ % 10 == 0) {
          bson_init( &lastErrorCmd );
          bson_append_int( &lastErrorCmd, "getLastError", 1);
          bson_append_int( &lastErrorCmd, "j", 1);
          bson_finish( &lastErrorCmd );

          bson_init( &lastError );
          mongo_run_command( conn, "test", &lastErrorCmd, &lastError );

          bson_destroy( &lastError );
          bson_destroy( &lastErrorCmd );
        }
    }

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 1; /* Let's reset write concern j field to 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fclose( fd );
    gridfile_writer_done( gfile );

    gridfs_find_filename( gfs, "bigfile-stream", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    gridfs_remove_filename( gfs, "bigfile-stream" );

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );

    bson_free( buffer );
    bson_free( read_buf );
    mongo_write_concern_destroy( &wc );
}
Exemplo n.º 4
0
static void test_insert( mongo *conn ) {
    mongo_write_concern wc0[1], wc1[1];
    bson b[1], b2[1], b3[1], b4[1];
    bson *objs[2];

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );

    mongo_write_concern_init( wc0 );    
    mongo_write_concern_set_w( wc0, 0 );
    mongo_write_concern_finish( wc0 );
    mongo_write_concern_init( wc1 );    
    mongo_write_concern_set_w( wc1, 1 );
    mongo_write_concern_finish( wc1 );

    bson_init( b4 );
    bson_append_string( b4, "foo", "bar" );
    bson_finish( b4 );

    ASSERT( mongo_insert( conn, TEST_NS, b4, wc1 ) == MONGO_OK );

    ASSERT( mongo_remove( conn, TEST_NS, bson_shared_empty( ), wc1 ) == MONGO_OK );

    bson_init( b );
    bson_append_new_oid( b, "_id" );
    bson_finish( b );

    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK );

    /* This fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK ); /* no getLastError request */

    ASSERT( mongo_insert( conn, TEST_NS, b, wc1 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );
    mongo_clear_errors( conn );

    /* Still fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK );

    /* But not when we set a default write concern on the conn. */
    mongo_set_write_concern( conn, wc1 );
    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* Now test batch insert. */
    bson_init( b2 );
    bson_append_new_oid( b2, "_id" );
    bson_finish( b2 );

    bson_init( b3 );
    bson_append_new_oid( b3, "_id" );
    bson_finish( b3 );

    objs[0] = b2;
    objs[1] = b3;

    /* Insert two new documents by insert_batch. */
    conn->write_concern = NULL;
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 1 );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 3 );

    /* This should definitely fail if we try again with write concern. */
    mongo_clear_errors( conn );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, wc1, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* But it will succeed without the write concern set. */
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );

    bson_destroy( b );
    bson_destroy( b2 );
    bson_destroy( b3 );
    bson_destroy( b4 );
    mongo_write_concern_destroy( wc0 );
    mongo_write_concern_destroy( wc1 );
}
Exemplo n.º 5
0
/* We can test write concern for update
 * and remove by doing operations on a capped collection. */
static void test_update_and_remove( mongo *conn ) {
    mongo_write_concern wc[1];
    bson *objs[5];
    bson query[1], update[1];
    int i;

    create_capped_collection( conn );

    for( i=0; i<5; i++ ) {
        objs[i] = bson_alloc();
        bson_init( objs[i] );
        bson_append_int( objs[i], "n", i );
        bson_finish( objs[i] );
    }

    ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5,
        NULL, 0 ) == MONGO_OK );

    ASSERT( mongo_count( conn, "test", "wc", bson_shared_empty( ) ) == 5 );

    bson_init( query );
    bson_append_int( query, "n", 2 );
    bson_finish( query );

    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    bson_init( update );
        bson_append_start_object( update, "$set" );
            bson_append_string( update, "n", "a big long string" );
        bson_append_finish_object( update );
    bson_finish( update );

    /* Update will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    /* Remove will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    mongo_write_concern_init( wc );    
    mongo_write_concern_set_w( wc, 1 );
    mongo_write_concern_finish( wc );

    mongo_clear_errors( conn );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" );

    mongo_clear_errors( conn );
    ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" );

    mongo_write_concern_destroy( wc );
    bson_destroy( query );
    bson_destroy( update );
    for( i=0; i<5; i++ ) {
        bson_destroy( objs[i] );
        bson_dealloc( objs[i] );
    }
}