Foo( int barr = initial_value_of_bar ) {
     my_bar = barr;
     if(MaxFooCount && FooCount >= MaxFooCount)
         __TBB_THROW( Foo_exception() );
     FooCount++;
     state = DefaultInitialized;
 }
 MyData(int i = 0) {
     my_state = LIVE;
     data = i;
     if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit)
         __TBB_THROW( MyException() );
     ++MyDataCount;
 }
 Foo( const Foo& foo ) {
     my_bar = foo.my_bar;
     ASSERT( foo.is_valid_or_zero(), "bad source for copy" );
     if(MaxFooCount && FooCount >= MaxFooCount)
         __TBB_THROW( Foo_exception() );
     FooCount++;
     state = CopyInitialized;
 }
 MyData( const MyData& other ) {
     ASSERT( other.my_state==LIVE, NULL );
     my_state = LIVE;
     data = other.data;
     if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit)
         __TBB_THROW( MyException() );
     ++MyDataCount;
 }
Esempio n. 5
0
void concurrent_vector_base::internal_reserve( size_type n, size_type element_size, size_type max_size ) {
    if( n>max_size ) {
        __TBB_THROW( std::length_error("argument to concurrent_vector::reserve exceeds concurrent_vector::max_size()") );
    }
    for( segment_index_t k = helper::find_segment_end(*this); segment_base(k)<n; ++k ) {
        helper::extend_segment_if_necessary(*this,k);
        size_t m = segment_size(k);
        __TBB_ASSERT( !my_segment[k].array, "concurrent operation during reserve(...)?" );
        my_segment[k].array = NFS_Allocate( m, element_size, NULL );
    }
}
 void operator() () const {
     Harness::ConcurrencyTracker ct;
     AssertLive();
     if ( g_Throw ) {
         if ( ++m_TaskCount == SKIP_CHORES )
             __TBB_THROW( test_exception(EXCEPTION_DESCR1) );
         __TBB_Yield();
     }
     else {
         ++g_TaskCount;
         while( !Concurrency::is_current_task_group_canceling() )
             __TBB_Yield();
     }
 }