void test_batch_insert_with_continue( mongo *conn ) {
    bson *objs[5];
    bson *objs2[5];
    bson empty;
    int i;

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );
    mongo_create_simple_index( conn, TEST_NS, "n", MONGO_INDEX_UNIQUE, NULL );

    for( i=0; i<5; i++ ) {
        objs[i] = bson_malloc( sizeof( bson ) );
        bson_init( objs[i] );
        bson_append_int( objs[i], "n", i );
        bson_finish( objs[i] );
    }

    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 5,
        NULL, 0 ) == MONGO_OK );

    ASSERT( mongo_count( conn, TEST_DB, TEST_COL,
          bson_empty( &empty ) ) == 5 );

    /* Add one duplicate value for n. */
    objs2[0] = bson_malloc( sizeof( bson ) );
    bson_init( objs2[0] );
    bson_append_int( objs2[0], "n", 1 );
    bson_finish( objs2[0] );

    /* Add n for 6 - 9. */
    for( i = 1; i < 5; i++ ) {
        objs2[i] = bson_malloc( sizeof( bson ) );
        bson_init( objs2[i] );
        bson_append_int( objs2[i], "n", i + 5 );
        bson_finish( objs2[i] );
    }

    /* Without continue on error, will fail immediately. */
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5,
        NULL, 0 ) == MONGO_OK );
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL,
          bson_empty( &empty ) ) == 5 );

    /* With continue on error, will insert four documents. */
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5,
        NULL, MONGO_CONTINUE_ON_ERROR ) == MONGO_OK );
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL,
          bson_empty( &empty ) ) == 9 );

    for( i=0; i<5; i++ ) {
        bson_destroy( objs2[i] );
        bson_free( objs2[i] );

        bson_destroy( objs[i] );
        bson_free( objs[i] );
    }
}
Пример #2
0
static void tutorial_insert_batch( mongo *conn ) {
  bson *p;
  bson **ps;
  char *names[4];
  int ages[] = { 29, 24, 24, 32 };
  int i, n = 4;
  names[0] = "Eliot"; names[1] = "Mike"; names[2] = "Mathias"; names[3] = "Richard";

  ps = ( bson ** )malloc( sizeof( bson * ) * n);

  for ( i = 0; i < n; i++ ) {
    p = ( bson * )malloc( sizeof( bson ) );
    bson_init( p );
    bson_append_new_oid( p, "_id" );
    bson_append_string( p, "name", names[i] );
    bson_append_int( p, "age", ages[i] );
    bson_finish( p );
    ps[i] = p;
  }

  mongo_insert_batch( conn, "tutorial.persons", (const bson **) ps, n, 0, 0 );

  for ( i = 0; i < n; i++ ) {
    bson_destroy( ps[i] );
    free( ps[i] );
  }
}
Пример #3
0
SEXP rmongo_insert_batch(SEXP mongo_conn, SEXP ns, SEXP b) {
    mongo* conn = _checkMongo(mongo_conn);
    const char* _ns = CHAR(STRING_ELT(ns, 0));
    SEXP ret;
    PROTECT(ret = allocVector(LGLSXP, 1));
    if (TYPEOF(b) != VECSXP)
        error("Expected a list of mongo.bson class objects");
    int len = LENGTH(b);
    bson** blist = Calloc(len, bson*);
    int i;
    int success = 1;
    for (i = 0; i < len && success; i++) {
        SEXP _b = VECTOR_ELT(b, i);
        if (!_isBSON(_b))
            success = 0;
        else
            blist[i] = _checkBSON(_b);
    }
    if (success)
        LOGICAL(ret)[0] = (mongo_insert_batch(conn, _ns, blist, len) == MONGO_OK);
    Free(blist);
    if (!success)
        error("Expected list of mongo.bson class objects");
    UNPROTECT(1);
    return ret;
}
Пример #4
0
int test_namespace_validation_on_insert( void ) {
    mongo conn[1];
    bson b[1], b2[1];
    bson *objs[2];

    INIT_SOCKETS_FOR_WINDOWS;

    if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    bson_init( b );
    bson_append_int( b, "foo", 1 );
    bson_finish( b );

    ASSERT( mongo_insert( conn, "tet.fo$o", b, NULL ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_NS_INVALID );
    ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 );
    mongo_clear_errors( conn );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "tet.fo$o",
          (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_NS_INVALID );
    ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 );

    return 0;
}
int test_insert_limits( const char *set_name ) {
    char version[10];
    mongo conn[1];
    mongo_write_concern wc[1];
    int i;
    char key[10];
    int res = 0;
    bson b[1], b2[1];
    bson *objs[2];

    mongo_write_concern_init( wc );
    wc->w = 1;
    mongo_write_concern_finish( wc );

    /* We'll perform the full test if we're running v2.0 or later. */
    if( mongo_get_server_version( version ) != -1 && version[0] <= '1' )
        return 0;

    mongo_replset_init( conn, set_name );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT );
    res = mongo_replset_connect( conn );

    if( res != MONGO_OK ) {
        res = conn->err;
        return res;
    }

    ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE );

    bson_init( b );
    for(i=0; i<1200000; i++) {
        sprintf( key, "%d", i + 10000000 );
        bson_append_int( b, key, i );
    }
    bson_finish( b );

    ASSERT( bson_size( b ) > conn->max_bson_size );

    ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_clear_errors( conn );
    ASSERT( conn->err == 0 );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "test.foo", (const bson**)objs, 2, wc, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_write_concern_destroy( wc );

    return 0;
}
Пример #6
0
result_t MongoCollection::_batchInsert(std::vector<const bson*> pdata, int num, int32_t& retVal, AsyncEvent* ac)
{
    if (ac->isSync())
        return CHECK_ERROR(CALL_E_NOSYNC);

    obj_ptr<MongoDB> db(m_db);
    if (!db)
        return CHECK_ERROR(CALL_E_INVALID_CALL);

    retVal = mongo_insert_batch(db->m_conn, m_ns.c_str(), pdata.data(), num, NULL, 0);
    return 0;
}
Пример #7
0
int test_insert_limits( void ) {
    char version[10];
    mongo conn[1];
    int i;
    char key[10];
    bson b[1], b2[1];
    bson *objs[2];

    /* Test the default max BSON size. */
    mongo_init( conn );
    ASSERT( conn->max_bson_size == MONGO_DEFAULT_MAX_BSON_SIZE );

    /* We'll perform the full test if we're running v2.0 or later. */
    if( mongo_get_server_version( version ) != -1 && version[0] <= '1' )
        return 0;

    if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE );

    bson_init( b );
    for(i=0; i<1200000; i++) {
        sprintf( key, "%d", i + 10000000 );
        bson_append_int( b, key, i );
    }
    bson_finish( b );

    ASSERT( bson_size( b ) > conn->max_bson_size );

    ASSERT( mongo_insert( conn, "test.foo", b, NULL ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_clear_errors( conn );
    ASSERT( conn->err == 0 );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "test.foo", (const bson **)objs, 2,
          NULL, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    return 0;
}
static void batch_insert_large_test( void ) {
    int i, j;
    bson b[BATCH_SIZE];
    const bson *bp[BATCH_SIZE];
    for ( j=0; j < BATCH_SIZE; j++ )
        bp[j] = &b[j];

    for ( i=0; i < ( PER_TRIAL / BATCH_SIZE ); i++ ) {
        for ( j=0; j < BATCH_SIZE; j++ )
            make_large( &b[j], i );

        mongo_insert_batch( conn, DB ".batch.large", bp, BATCH_SIZE, NULL, 0 );

        for ( j=0; j < BATCH_SIZE; j++ )
            bson_destroy( &b[j] );
    }
}
Пример #9
0
static void batch_insert_medium_test() {
    int i, j;
    bson b[BATCH_SIZE];
    bson *bp[BATCH_SIZE];
    for ( j=0; j < BATCH_SIZE; j++ )
        bp[j] = &b[j];

    for ( i=0; i < ( PER_TRIAL / BATCH_SIZE ); i++ ) {
        for ( j=0; j < BATCH_SIZE; j++ )
            make_medium( &b[j], i );

        mongo_insert_batch( conn, DB ".batch.medium", bp, BATCH_SIZE );

        for ( j=0; j < BATCH_SIZE; j++ )
            bson_destroy( &b[j] );
    }
}
Пример #10
0
int main() {
    mongo conn[1];
    bson b, empty;
    mongo_cursor cursor[1];
    unsigned char not_utf8[3];
    int result = 0;
    const char *ns = "test.c.validate";

    int i=0, j=0;
    bson bs[BATCH_SIZE];
    bson *bp[BATCH_SIZE];

    not_utf8[0] = 0xC0;
    not_utf8[1] = 0xC0;
    not_utf8[2] = '\0';

    INIT_SOCKETS_FOR_WINDOWS;

    if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    /* Test checking for finished bson. */
    bson_init( &b );
    bson_append_int( &b, "foo", 1 );
    ASSERT( mongo_insert( conn, "test.foo", &b, NULL ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_NOT_FINISHED );
    bson_destroy( &b );

    /* Test valid keys. */
    bson_init( &b );
    result = bson_append_string( &b , "a.b" , "17" );
    ASSERT( result == BSON_OK );

    ASSERT( b.err & BSON_FIELD_HAS_DOT );

    /* Don't set INIT dollar if deb ref fields are being used. */
    result = bson_append_string( &b , "$id" , "17" );
    ASSERT( result == BSON_OK );
    ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) );

    result = bson_append_string( &b , "$ref" , "17" );
    ASSERT( result == BSON_OK );
    ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) );

    result = bson_append_string( &b , "$db" , "17" );
    ASSERT( result == BSON_OK );
    ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) );

    result = bson_append_string( &b , "$ab" , "17" );
    ASSERT( result == BSON_OK );
    ASSERT( b.err & BSON_FIELD_INIT_DOLLAR );

    result = bson_append_string( &b , "ab" , "this is valid utf8" );
    ASSERT( result == BSON_OK );
    ASSERT( ! ( b.err & BSON_NOT_UTF8 ) );

    result = bson_append_string( &b , ( const char * )not_utf8, "valid" );
    ASSERT( result == BSON_ERROR );
    ASSERT( b.err & BSON_NOT_UTF8 );

    ASSERT( bson_finish( &b ) == BSON_ERROR );
    ASSERT( b.err & BSON_FIELD_HAS_DOT );
    ASSERT( b.err & BSON_FIELD_INIT_DOLLAR );
    ASSERT( b.err & BSON_NOT_UTF8 );

    result = mongo_insert( conn, ns, &b, NULL );
    ASSERT( result == MONGO_ERROR );
    ASSERT( conn->err & MONGO_BSON_NOT_FINISHED );

    result = mongo_update( conn, ns, bson_empty( &empty ), &b, 0, NULL );
    ASSERT( result == MONGO_ERROR );
    ASSERT( conn->err & MONGO_BSON_NOT_FINISHED );

    mongo_cursor_init( cursor, conn, "test.cursors" );
    mongo_cursor_set_query( cursor, &b );
    result = mongo_cursor_next( cursor );
    ASSERT( result == MONGO_ERROR );
    ASSERT( cursor->err & MONGO_CURSOR_BSON_ERROR );
    ASSERT( cursor->conn->err & MONGO_BSON_NOT_FINISHED );

    bson_destroy( &b );
    mongo_cursor_destroy( cursor );

    /* Test valid strings. */
    bson_init( &b );
    result = bson_append_string( &b , "foo" , "bar" );
    ASSERT( result == BSON_OK );
    ASSERT( b.err == 0 );

    result = bson_append_string( &b , "foo" , ( const char * )not_utf8 );
    ASSERT( result == BSON_ERROR );
    ASSERT( b.err & BSON_NOT_UTF8 );

    b.err = 0;
    ASSERT( b.err == 0 );

    result = bson_append_regex( &b , "foo" , ( const char * )not_utf8, "s" );
    ASSERT( result == BSON_ERROR );
    ASSERT( b.err & BSON_NOT_UTF8 );

    for ( j=0; j < BATCH_SIZE; j++ )
        bp[j] = &bs[j];

    for ( j=0; j < BATCH_SIZE; j++ )
        make_small_invalid( &bs[j], i );

    result = mongo_insert_batch( conn, ns, (const bson **)bp, BATCH_SIZE, NULL, 0 );
    ASSERT( result == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_INVALID );

    for ( j=0; j < BATCH_SIZE; j++ )
        bson_destroy( &bs[j] );

    bson_destroy( &b );
    mongo_cmd_drop_db( conn, "test" );
    mongo_disconnect( conn );

    mongo_destroy( conn );

    return 0;
}
Пример #11
0
static void test_insert( mongo *conn ) {
    mongo_write_concern wc0[1], wc1[1];
    bson b[1], b2[1], b3[1], b4[1];
    bson *objs[2];

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );

    mongo_write_concern_init( wc0 );    
    mongo_write_concern_set_w( wc0, 0 );
    mongo_write_concern_finish( wc0 );
    mongo_write_concern_init( wc1 );    
    mongo_write_concern_set_w( wc1, 1 );
    mongo_write_concern_finish( wc1 );

    bson_init( b4 );
    bson_append_string( b4, "foo", "bar" );
    bson_finish( b4 );

    ASSERT( mongo_insert( conn, TEST_NS, b4, wc1 ) == MONGO_OK );

    ASSERT( mongo_remove( conn, TEST_NS, bson_shared_empty( ), wc1 ) == MONGO_OK );

    bson_init( b );
    bson_append_new_oid( b, "_id" );
    bson_finish( b );

    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK );

    /* This fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK ); /* no getLastError request */

    ASSERT( mongo_insert( conn, TEST_NS, b, wc1 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );
    mongo_clear_errors( conn );

    /* Still fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK );

    /* But not when we set a default write concern on the conn. */
    mongo_set_write_concern( conn, wc1 );
    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* Now test batch insert. */
    bson_init( b2 );
    bson_append_new_oid( b2, "_id" );
    bson_finish( b2 );

    bson_init( b3 );
    bson_append_new_oid( b3, "_id" );
    bson_finish( b3 );

    objs[0] = b2;
    objs[1] = b3;

    /* Insert two new documents by insert_batch. */
    conn->write_concern = NULL;
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 1 );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 3 );

    /* This should definitely fail if we try again with write concern. */
    mongo_clear_errors( conn );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, wc1, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* But it will succeed without the write concern set. */
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );

    bson_destroy( b );
    bson_destroy( b2 );
    bson_destroy( b3 );
    bson_destroy( b4 );
    mongo_write_concern_destroy( wc0 );
    mongo_write_concern_destroy( wc1 );
}
Пример #12
0
/* We can test write concern for update
 * and remove by doing operations on a capped collection. */
static void test_update_and_remove( mongo *conn ) {
    mongo_write_concern wc[1];
    bson *objs[5];
    bson query[1], update[1];
    int i;

    create_capped_collection( conn );

    for( i=0; i<5; i++ ) {
        objs[i] = bson_alloc();
        bson_init( objs[i] );
        bson_append_int( objs[i], "n", i );
        bson_finish( objs[i] );
    }

    ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5,
        NULL, 0 ) == MONGO_OK );

    ASSERT( mongo_count( conn, "test", "wc", bson_shared_empty( ) ) == 5 );

    bson_init( query );
    bson_append_int( query, "n", 2 );
    bson_finish( query );

    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    bson_init( update );
        bson_append_start_object( update, "$set" );
            bson_append_string( update, "n", "a big long string" );
        bson_append_finish_object( update );
    bson_finish( update );

    /* Update will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    /* Remove will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    mongo_write_concern_init( wc );    
    mongo_write_concern_set_w( wc, 1 );
    mongo_write_concern_finish( wc );

    mongo_clear_errors( conn );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" );

    mongo_clear_errors( conn );
    ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" );

    mongo_write_concern_destroy( wc );
    bson_destroy( query );
    bson_destroy( update );
    for( i=0; i<5; i++ ) {
        bson_destroy( objs[i] );
        bson_dealloc( objs[i] );
    }
}
Пример #13
0
int flush_to_mongo(st_http_request * p, int counter)
{
    mongo conn;
    mongo_init(&conn);
    mongo_set_op_timeout(&conn, 10000);
    int status = mongo_client(&conn, globalArgs.host, globalArgs.port);
    if (status != MONGO_OK) {
	switch (conn.err) {
	case MONGO_CONN_SUCCESS:
	    printf("Connected to mongo\n");
	case MONGO_CONN_NO_SOCKET:
	    printf("FAIL: Could not create a socket!\n");
	    break;
	case MONGO_CONN_ADDR_FAIL:
	    printf("FAIL: MONGO_CONN_ADDR_FAIL: %s\n", globalArgs.host);
	    break;
	case MONGO_CONN_NOT_MASTER:
	    printf("FAIL: MONGO_CONN_NOT_MASTER\n");
	    break;
	case MONGO_CONN_BAD_SET_NAME:
	    printf("FAIL: MONGO_CONN_BAD_SET_NAME\n");
	    break;
	case MONGO_CONN_NO_PRIMARY:
	    printf("FAIL: MONGO_CONN_NO_PRIMARY\n");
	    break;
	case MONGO_IO_ERROR:
	    printf("FAIL: MONGO_IO_ERROR\n");
	    break;
	case MONGO_SOCKET_ERROR:
	    printf("FAIL: MONGO_SOCKET_ERROR\n");
	    break;
	case MONGO_READ_SIZE_ERROR:
	    printf("FAIL: MONGO_READ_SIZE_ERROR\n");
	    break;
	case MONGO_COMMAND_FAILED:
	    printf("FAIL: MONGO_COMMAND_FAILED\n");
	    break;
	case MONGO_WRITE_ERROR:
	    printf("FAIL: MONGO_WRITE_ERROR\n");
	    break;
	case MONGO_NS_INVALID:
	    printf("FAIL: MONGO_NS_INVALID\n");
	    break;
	case MONGO_BSON_INVALID:
	    printf("FAIL: MONGO_BSON_INVALIDr\n");
	    break;
	case MONGO_BSON_NOT_FINISHED:
	    printf("FAIL: MONGO_BSON_NOT_FINISHED\n");
	    break;
	case MONGO_BSON_TOO_LARGE:
	    printf("FAIL: MONGO_BSON_TOO_LARGEr\n");
	    break;
	case MONGO_WRITE_CONCERN_INVALID:
	    printf("FAIL: Mongo write concern invalid\n");
	    break;
	case MONGO_CONN_FAIL:
	    printf
		("FAIL: Mongo connection fail. Make sure it's listening at %s:%d\n",
		 globalArgs.host, globalArgs.port);
	    break;
	}

	exit(EXIT_FAILURE);
    }

    bson **bps;
    bps = (bson **) malloc(sizeof(bson *) * counter);

    char *fields = globalArgs.fields;
    int i = 0;
    for (i = 0; i < counter; i++) {
	bson *bp = (bson *) malloc(sizeof(bson));
	bson_init(bp);
	bson_append_new_oid(bp, "_id");
	if (fields == NULL || strstr(fields, "req_ip") != NULL)
	    bson_append_string(bp, "req_ip", p[i].req_ip);
	if (fields == NULL || strstr(fields, "req_ident") != NULL)
	    bson_append_string(bp, "req_ident", p[i].req_ident);
	if (fields == NULL || strstr(fields, "req_user") != NULL)
	    bson_append_string(bp, "req_user", p[i].req_user);
	if (fields == NULL || strstr(fields, "req_datetime") != NULL)
	    bson_append_string(bp, "req_datetime", p[i].req_datetime);
	if (fields == NULL || strstr(fields, "req_method") != NULL)
	    bson_append_string(bp, "req_method", p[i].req_method);
	if (fields == NULL || strstr(fields, "req_uri") != NULL)
	    bson_append_string(bp, "req_uri", p[i].req_uri);
	if (fields == NULL || strstr(fields, "req_proto") != NULL)
	    bson_append_string(bp, "req_proto", p[i].req_proto);
	if (fields == NULL || strstr(fields, "resp_code") != NULL)
	    bson_append_int(bp, "resp_code", p[i].resp_code);
	if (fields == NULL || strstr(fields, "resp_bytes") != NULL)
	    bson_append_int(bp, "resp_bytes", atoi(p[i].resp_bytes));
	if (fields == NULL || strstr(fields, "req_referer") != NULL)
	    bson_append_string(bp, "req_referer", p[i].req_referer);
	if (fields == NULL || strstr(fields, "req_agent") != NULL)
	    bson_append_string(bp, "req_agent", p[i].req_agent);
	bson_finish(bp);
	bps[i] = bp;
    }
    mongo_insert_batch(&conn, globalArgs.collection, (const bson **) bps,
		       counter, 0, MONGO_CONTINUE_ON_ERROR);

    mongo_destroy(&conn);

    int j = 0;
    for (j = 0; j < counter; j++) {
	bson_destroy(bps[j]);
	free(bps[j]);
    }
    free(bps);
    return (0);
}