void run() { DurTransaction txn; create(); ASSERT_EQUALS( 2, nExtents() ); DiskLoc l[ 8 ]; for ( int i = 0; i < 8; ++i ) { StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigObj(), true ); ASSERT( status.isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); //if ( i > 3 ) // ASSERT( l[ i ] == l[ i - 4 ] ); } ASSERT( nRecords() == 8 ); // Too big BSONObjBuilder bob; bob.appendOID( "_id", NULL, true ); bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096 BSONObj bigger = bob.done(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, false ); ASSERT( !status.isOK() ); ASSERT_EQUALS( 0, nRecords() ); }
void run() { stringstream spec; spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}"; string err; ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) ); prepare(); int j = 0; for ( auto_ptr< Cursor > i = theDataFileMgr.findAll( ns() ); i->ok(); i->advance(), ++j ) ASSERT_EQUALS( j, i->current().firstElement().number() ); ASSERT_EQUALS( count(), j ); j = count() - 1; for ( auto_ptr< Cursor > i = findTableScan( ns(), fromjson( "{\"$natural\":-1}" ) ); i->ok(); i->advance(), --j ) ASSERT_EQUALS( j, i->current().firstElement().number() ); ASSERT_EQUALS( -1, j ); }
void run() { create(); ASSERT_EQUALS( 2, nExtents() ); BSONObj b = bigObj(); DiskLoc l[ 8 ]; for ( int i = 0; i < 8; ++i ) { l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() ); ASSERT( !l[ i ].isNull() ); ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); if ( i > 3 ) ASSERT( l[ i ] == l[ i - 4 ] ); } // Too big BSONObjBuilder bob; bob.append( "a", string( 787, 'a' ) ); BSONObj bigger = bob.done(); ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() ); ASSERT_EQUALS( 0, nRecords() ); }
void pass(int p) { DurTransaction txn; create(); ASSERT_EQUALS( 2, nExtents() ); BSONObj b = bigObj(); int N = MinExtentSize / b.objsize() * nExtents() + 5; int T = N - 4; DiskLoc truncAt; //DiskLoc l[ 8 ]; for ( int i = 0; i < N; ++i ) { BSONObj bb = bigObj(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bb, true ); ASSERT( status.isOK() ); DiskLoc a = status.getValue(); if( T == i ) truncAt = a; ASSERT( !a.isNull() ); /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); if ( i > 3 ) ASSERT( l[ i ] == l[ i - 4 ] );*/ } ASSERT( nRecords() < N ); DiskLoc last, first; { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::BACKWARD)); runner->getNext(NULL, &last); ASSERT( !last.isNull() ); } { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &first); ASSERT( !first.isNull() ); ASSERT( first != last ) ; } collection()->temp_cappedTruncateAfter(&txn, truncAt, false); ASSERT_EQUALS( collection()->numRecords() , 28u ); { DiskLoc loc; auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &loc); ASSERT( first == loc); } { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::BACKWARD)); DiskLoc loc; runner->getNext(NULL, &loc); ASSERT( last != loc ); ASSERT( !last.isNull() ); } // Too big BSONObjBuilder bob; bob.appendOID("_id", 0, true); bob.append( "a", string( MinExtentSize + 300, 'a' ) ); BSONObj bigger = bob.done(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, true ); ASSERT( !status.isOK() ); ASSERT_EQUALS( 0, nRecords() ); }