/*override*/ tbb::task* execute() { if( my_depth>0 ) { int child_count = my_child_count; scheduler* my_sched = internal::governor::local_scheduler(); tbb::task& c = *new( allocate_continuation() ) tbb::empty_task; c.set_ref_count( child_count ); recycle_as_child_of(c); --child_count; if( Producer==my_sched ) { // produce a task and put it into Exchanger tbb::task* t = new( c.allocate_child() ) tbb::empty_task; --child_count; t = Exchanger.fetch_and_store(t); if( t ) spawn(*t); } else { tbb::task* t = Exchanger.fetch_and_store(NULL); if( t ) spawn(*t); } while( child_count ) { tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1); if( my_depth >4 ) enqueue(*t); else spawn(*t); --child_count; } --my_depth; return this; } else { tbb::task* t = Exchanger.fetch_and_store(NULL); if( t ) spawn(*t); return NULL; } }
/*override*/ tbb::task* execute() { if( my_depth>0 ) { int child_count = my_child_count; scheduler* my_sched = GetThreadSpecific(); tbb::task& c = *new( tbb::task::allocate_continuation() ) tbb::empty_task; c.set_ref_count( child_count ); recycle_as_child_of(c); --child_count; if( Producer==my_sched ) { // produce a task and put it into Exchanger tbb::task* t = new( c.allocate_child() ) tbb::empty_task; --child_count; t = Exchanger.fetch_and_store(t); if( t ) this->spawn(*t); } else { tbb::task* t = Exchanger.fetch_and_store(NULL); if( t ) this->spawn(*t); } while( child_count ) { c.spawn( *new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1) ); --child_count; } --my_depth; return this; } else { tbb::task* t = Exchanger.fetch_and_store(NULL); if( t ) this->spawn(*t); return NULL; } }
void wait(ogl_device& ogl) { int delay = 0; if(!ogl.invoke([this]{return ready();}, high_priority)) { while(!ogl.invoke([this]{return ready();}, normal_priority) && delay < 20) { delay += 2; Sleep(2); } } static tbb::atomic<size_t> count; static tbb::atomic<bool> warned; if(delay > 2 && ++count > 50) { if(!warned.fetch_and_store(true)) { CASPAR_LOG(warning) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms. Further warnings are sent to trace log level." << L" You can ignore this warning if you do not notice any problems with output video. This warning is caused by insufficent support or performance of your graphics card for OpenGL based memory transfers. " << L" Please try to update your graphics drivers or update your graphics card, see recommendations on (www.casparcg.com)." << L" Further help is available at (www.casparcg.com/forum)."; } else CASPAR_LOG(trace) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms."; } }
tbb::task* execute() { tbb::priority_t prev = g_order.fetch_and_store(my_priority); if( my_priority != prev) { REMARK("prev:%s --> new:%s\n", PriorityName(prev), PriorityName(my_priority)); // TODO: improve the test for concurrent workers if(!g_order_established) { // initial transition path allowed low->[normal]->high if(my_priority == tbb::priority_high) g_order_established = true; else ASSERT(my_priority == tbb::priority_normal && prev == tbb::priority_low, NULL); } else { //transition path allowed high->normal->low if(prev == tbb::priority_high) ASSERT( my_priority == tbb::priority_normal, "previous priority is high - bad order"); else if(prev == tbb::priority_normal) ASSERT( my_priority == tbb::priority_low, "previous priority is normal - bad order"); else ASSERT(!g_order_established, "transition from low priority but not during initialization"); } } EmulateWork(0); return NULL; }
void free_handles() { const size_t size = my_size.fetch_and_store( 0 ); for (size_t i=0; i<size; ++i) dynamic_unlink( my_handles[i] ); }
/*override*/ bool on_scheduler_leaving() { if( m_leave_ticket == 0 ) return true; return m_leave_ticket.fetch_and_store(-1) > 0; }