Ejemplo n.º 1
0
 /*override*/ tbb::task* execute() {
     if( my_depth>0 ) {
         int child_count = my_child_count;
         scheduler* my_sched = GetThreadSpecific();
         tbb::task& c  = *new( tbb::task::allocate_continuation() ) tbb::empty_task;
         c.set_ref_count( child_count );
         recycle_as_child_of(c);
         --child_count;
         if( Producer==my_sched ) {
             // produce a task and put it into Exchanger
             tbb::task* t = new( c.allocate_child() ) tbb::empty_task;
             --child_count;
             t = Exchanger.fetch_and_store(t);
             if( t ) this->spawn(*t);
         } else {
             tbb::task* t = Exchanger.fetch_and_store(NULL);
             if( t ) this->spawn(*t);
         }
         while( child_count ) {
             c.spawn( *new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1) );
             --child_count;
         }
         --my_depth;
         return this;
     } else {
         tbb::task* t = Exchanger.fetch_and_store(NULL);
         if( t ) this->spawn(*t);
         return NULL;
     }
 }
Ejemplo n.º 2
0
 /*override*/ tbb::task* execute() {
     if( my_depth>0 ) {
         int child_count = my_child_count;
         scheduler* my_sched = internal::governor::local_scheduler();
         tbb::task& c  = *new( allocate_continuation() ) tbb::empty_task;
         c.set_ref_count( child_count );
         recycle_as_child_of(c);
         --child_count;
         if( Producer==my_sched ) {
             // produce a task and put it into Exchanger
             tbb::task* t = new( c.allocate_child() ) tbb::empty_task;
             --child_count;
             t = Exchanger.fetch_and_store(t);
             if( t ) spawn(*t);
         } else {
             tbb::task* t = Exchanger.fetch_and_store(NULL);
             if( t ) spawn(*t);
         }
         while( child_count ) {
             tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1);
             if( my_depth >4 ) enqueue(*t);
             else              spawn(*t);
             --child_count;
         }
         --my_depth;
         return this;
     } else {
         tbb::task* t = Exchanger.fetch_and_store(NULL);
         if( t ) spawn(*t);
         return NULL;
     }
 }
Ejemplo n.º 3
0
	void wait(ogl_device& ogl)
	{	
		int delay = 0;
		if(!ogl.invoke([this]{return ready();}, high_priority))
		{
			while(!ogl.invoke([this]{return ready();}, normal_priority) && delay < 20)
			{
				delay += 2;
				Sleep(2);
			}
		}
		
		static tbb::atomic<size_t> count;
		static tbb::atomic<bool> warned;
		
		if(delay > 2 && ++count > 50)
		{
			if(!warned.fetch_and_store(true))
			{
				CASPAR_LOG(warning) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms. Further warnings are sent to trace log level."
									<< L" You can ignore this warning if you do not notice any problems with output video. This warning is caused by insufficent support or performance of your graphics card for OpenGL based memory transfers. "
									<< L" Please try to update your graphics drivers or update your graphics card, see recommendations on (www.casparcg.com)."
									<< L" Further help is available at (www.casparcg.com/forum).";
			}
			else
				CASPAR_LOG(trace) << L"[fence] Performance warning. GPU was not ready during requested host read-back. Delayed by atleast: " << delay << L" ms.";
		}
	}
Ejemplo n.º 4
0
/*
 * Use atomic compare-and-swap to update val to
 * val + inc (*in log-space*).  Update occurs in a loop in case other
 * threads update in the meantime.
 */
inline void incLoopLog(tbb::atomic<double>& val, double inc) {
	double oldMass = val.load();
	double returnedMass = oldMass;
	double newMass{salmon::math::LOG_0};
	do {
	  oldMass = returnedMass;
 	  newMass = salmon::math::logAdd(oldMass, inc);
	  returnedMass = val.compare_and_swap(newMass, oldMass);
	} while (returnedMass != oldMass);
}
Ejemplo n.º 5
0
/*
 * Use atomic compare-and-swap to update val to
 * val + inc.  Update occurs in a loop in case other
 * threads update in the meantime.
 */
inline void incLoop(tbb::atomic<double>& val, double inc) {
        double oldMass = val.load();
        double returnedMass = oldMass;
        double newMass{oldMass + inc};
        do {
            oldMass = returnedMass;
            newMass = oldMass + inc;
            returnedMass = val.compare_and_swap(newMass, oldMass);
        } while (returnedMass != oldMass);
}
Ejemplo n.º 6
0
    void operator () (const tbb::blocked_range<int> &range) const {
        int j;
        for(j=range.begin(); j<range.end(); j++) {
            //printf("Inner Loop Body (x,y)=(%d,%d) where y in [%d,%d)\n", i,j,range.begin(), range.end() );
	    int oldGatek;
            int freshNode,currentEdge;

            currentEdge = vertices[currentLevelSet[i]]+j;
            // let's handle one edge
#ifdef XBOFILE
            freshNode = edges[currentEdge][1]; // 0 RTM, value was prefetched
#else
            freshNode = edges[currentEdge];    // 0 RTM, value was prefetched
#endif
            oldGatek = -1;
            // test gatekeeper 
            oldGatek = gatekeeper[freshNode].fetch_and_increment();
	    if (oldGatek == 0) { // destination vertex unvisited!
                // increment newLevelIndex atomically
                int myIndex = newLevelIndex.fetch_and_increment();
	        // store fresh node in new level
	        newLevelSet[myIndex] = freshNode;

	        level[freshNode] = currentLevel + 1;
	    } // end if freshNode
        } // end for j
    }
Ejemplo n.º 7
0
 bool operator()( T &v ) {
    v = (T)my_count.fetch_and_increment();
    if ( (int)v < N )
       return true;
    else
       return false;
 } 
Ejemplo n.º 8
0
    void operator () (const tbb::blocked_range<int> &range) const {
        int i; 
        PARTITIONER partitioner;
        for (i=range.begin(); i<range.end(); i++) {
            //printf("Outer Loop Body[%d<=%d<%d]=[0,%d]\n", range.begin(), i, range.end(), degrees[currentLevelSet[i]]);
#ifdef SEQUENTIAL_INNER
            for(int j=0; j<degrees[currentLevelSet[i]]; j++) {
                int oldGatek;
                int freshNode,currentEdge;

                currentEdge = vertices[currentLevelSet[i]]+j;
                // let's handle one edge
#ifdef XBOFILE
                freshNode = edges[currentEdge][1]; // 0 RTM, value was prefetched
#else
                freshNode = edges[currentEdge];    // 0 RTM, value was prefetched
#endif
                oldGatek = -1;
                // test gatekeeper 
                oldGatek = gatekeeper[freshNode].fetch_and_increment();
                if (oldGatek == 0) { // destination vertex unvisited!
                    // increment newLevelIndex atomically
                    int myIndex = newLevelIndex.fetch_and_increment();
                    // store fresh node in new level
                    newLevelSet[myIndex] = freshNode;
 
                    level[freshNode] = currentLevel + 1;
                } // end if freshNode
            }
#else
            tbb::parallel_for (tbb::blocked_range<int>(0,degrees[currentLevelSet[i]],INNER_GRAINSIZE), innerLoopBody(i), partitioner);
#endif
        }
    }
Ejemplo n.º 9
0
	PathEntity(const PathEntity &entity)
	{
		id = entity.id;
		traversing.store(entity.traversing);
		pathNodes = entity.pathNodes;
		position = entity.position;
	}
inline void logAddMass(tbb::atomic<double>& bin,
                       double newMass) {
      double oldVal = bin;
      double retVal = oldVal;
      double newVal = 0.0;
      do {
          oldVal = retVal;
          newVal = salmon::math::logAdd(oldVal, newMass);
          retVal = bin.compare_and_swap(newVal, oldVal);
      } while (retVal != oldVal);
}
Ejemplo n.º 11
0
	virtual boost::unique_future<bool> send(const safe_ptr<read_frame>& frame) override
	{
		bool pushed = frame_buffer_.try_push(frame);

		if (pushed && !first_frame_reported_)
		{
			first_frame_promise_.set_value();
			first_frame_reported_ = true;
		}

		return caspar::wrap_as_future(is_running_.load());
	}
Ejemplo n.º 12
0
        simplest_scheduler::simplest_scheduler( context_base &context, int numThreads, int htstride ) 
            : scheduler_i( context ),
              m_status(),
              m_rootTask(),
              m_initTBB( std::max( 2, numThreads + ( distributor::myPid() == 0 ? 0 : 1 ) ) ),
              m_taskGroupContext( tbb::task_group_context::isolated, tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait )
        {
            //            {Speaker oss; oss << std::max( 2, numThreads + ( distributor::myPid() == 0 ? 0 : 1 ) );}
			if( htstride && s_have_pinning_observer.compare_and_swap( true, false ) == false ) {
				s_po = new pinning_observer( htstride );
			}
            m_status = COMPLETED;
            m_rootTask = new( tbb::task::allocate_root( m_taskGroupContext ) ) tbb::empty_task;
            m_rootTask->set_ref_count( 1 );
        }
Ejemplo n.º 13
0
	virtual boost::unique_future<bool> send(const safe_ptr<core::read_frame>& frame) override
	{
		auto buffer = std::make_shared<audio_buffer_16>(
			core::audio_32_to_16(core::get_rearranged_and_mixed(frame->multichannel_view(), channel_layout_, channel_layout_.num_channels)));

		if (!input_.try_push(std::make_pair(frame, buffer)))
			graph_->set_tag("dropped-frame");

		if (Status() != Playing && !started_)
		{
			sf::SoundStream::Initialize(2, format_desc_.audio_sample_rate);
			Play();
			started_ = true;
		}

		return wrap_as_future(is_running_.load());
	}
        simplest_prioritized_scheduler::simplest_prioritized_scheduler( context_base &context, int numThreads, int htstride ) 
            : scheduler_i( context ),
              m_runQueue(),
              m_status(),
              m_allocated(),
              m_initTBB( numThreads  + ( distributor::myPid() == 0 ? 0 : 1 ) ),
              m_applyStepInstance( NULL )
        {
			if( htstride && s_have_pinning_observer.compare_and_swap( true, false ) == false ) {
				s_po = new pinning_observer( htstride );
			}
            m_status = COMPLETED;
            m_allocated = false;
            if( m_allocated.compare_and_swap( true, false ) == false ) {
                m_applyStepInstance = new apply_step_instance();
            }

        }
Ejemplo n.º 15
0
 tbb::task* execute() {
     tbb::priority_t prev = g_order.fetch_and_store(my_priority);
     if( my_priority != prev) {
         REMARK("prev:%s --> new:%s\n", PriorityName(prev), PriorityName(my_priority));
         // TODO: improve the test for concurrent workers
         if(!g_order_established) {
             // initial transition path allowed low->[normal]->high
             if(my_priority == tbb::priority_high)
                 g_order_established = true;
             else ASSERT(my_priority == tbb::priority_normal && prev == tbb::priority_low, NULL);
         } else { //transition path allowed high->normal->low
             if(prev == tbb::priority_high) ASSERT( my_priority == tbb::priority_normal, "previous priority is high - bad order");
             else if(prev == tbb::priority_normal) ASSERT( my_priority == tbb::priority_low, "previous priority is normal - bad order");
             else ASSERT(!g_order_established, "transition from low priority but not during initialization");
         }
     }
     EmulateWork(0);
     return NULL;
 }
Ejemplo n.º 16
0
 void free_handles() {
     const size_t size = my_size.fetch_and_store( 0 );
     for (size_t i=0; i<size; ++i)
         dynamic_unlink( my_handles[i] );
 }
Ejemplo n.º 17
0
 bool nextTaskID(std::size_t &taskID)
 {
   taskID = counter.fetch_and_increment();
   return taskID < NofAtom;
 }
Ejemplo n.º 18
0
 bool nextTaskID(std::size_t &taskID, std::size_t increment)
 {
   taskID = counter.fetch_and_add(increment);
   return taskID < NofAtom;
 }
Ejemplo n.º 19
0
    namespace Internal {
        
        static tbb::atomic< bool > s_have_pinning_observer;

		namespace {
			static pinning_observer * s_po = NULL;
			struct poi {
				~poi() { delete s_po; }
			};
			static poi s_poi;
		}

        //        template< typename Step >
        class simplest_scheduler::TaskWrapper : public tbb::task
        {
        public: 
            tbb::task * execute()
            {
                CNC_ASSERT( m_schedulable );
                m_schedulable->scheduler().do_execute( m_schedulable );
                return NULL;
            }

            TaskWrapper( schedulable * s ) : 
                m_schedulable( s )
            {}

        private:
            schedulable * m_schedulable;
        };

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        
        simplest_scheduler::simplest_scheduler( context_base &context, int numThreads, int htstride ) 
            : scheduler_i( context ),
              m_status(),
              m_rootTask(),
              m_initTBB( std::max( 2, numThreads + ( distributor::myPid() == 0 ? 0 : 1 ) ) ),
              m_taskGroupContext( tbb::task_group_context::isolated, tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait )
        {
            //            {Speaker oss; oss << std::max( 2, numThreads + ( distributor::myPid() == 0 ? 0 : 1 ) );}
			if( htstride && s_have_pinning_observer.compare_and_swap( true, false ) == false ) {
				s_po = new pinning_observer( htstride );
			}
            m_status = COMPLETED;
            m_rootTask = new( tbb::task::allocate_root( m_taskGroupContext ) ) tbb::empty_task;
            m_rootTask->set_ref_count( 1 );
        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        simplest_scheduler::~simplest_scheduler()
        {
            m_rootTask->decrement_ref_count();
            m_rootTask->destroy( *m_rootTask );
        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        void simplest_scheduler::do_schedule( schedulable * stepInstance )
        { 
            m_status.compare_and_swap( RUNNING, COMPLETED );
            //            tbb_waiter * _waitTask = new( tbb::task::allocate_root() ) TaskWrapper( stepInstance );
            TaskWrapper * newTask = new( tbb::task::allocate_additional_child_of( *m_rootTask ) ) TaskWrapper( stepInstance );
            //tbb::task::enqueue( *newTask );
            tbb::task::spawn( *newTask );
        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        void simplest_scheduler::wait( const inflight_counter_type & /*steps_in_flight*/ )
        {
            m_rootTask->wait_for_all();
            m_status = COMPLETED;
        }
    } // namespace Internal
    namespace Internal {

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
		
        static tbb::atomic< bool > s_have_pinning_observer;
		
		namespace {
			static pinning_observer * s_po = NULL;
			struct poi {
				~poi() { delete s_po; }
			};
			static poi s_poi;
		}

        template<typename Body>
        class simplest_prioritized_scheduler::TaskWrapper2 : public tbb::task
        {
            const Body& my_body;
            typename Body::argument_type my_value;
            TaskWrapper2< Body > * toBeReturned;

        public: 
            task* execute() {
                my_body(my_value); 
                return toBeReturned;
            };

            void setNext(TaskWrapper2<Body>* givenNext) { toBeReturned = givenNext; }

            TaskWrapper2( const typename Body::argument_type& value, const Body& body ) : 
                my_body(body), my_value(value), toBeReturned(NULL){}
        };

        class simplest_prioritized_scheduler::apply_step_instance
        {
        public:
            void operator()( schedulable * stepInstance ) const
            {
                CNC_ASSERT( stepInstance );
                stepInstance->scheduler().do_execute( stepInstance );
            }

            typedef schedulable * argument_type;
        };

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        simplest_prioritized_scheduler::simplest_prioritized_scheduler( context_base &context, int numThreads, int htstride ) 
            : scheduler_i( context ),
              m_runQueue(),
              m_status(),
              m_allocated(),
              m_initTBB( numThreads  + ( distributor::myPid() == 0 ? 0 : 1 ) ),
              m_applyStepInstance( NULL )
        {
			if( htstride && s_have_pinning_observer.compare_and_swap( true, false ) == false ) {
				s_po = new pinning_observer( htstride );
			}
            m_status = COMPLETED;
            m_allocated = false;
            if( m_allocated.compare_and_swap( true, false ) == false ) {
                m_applyStepInstance = new apply_step_instance();
            }

        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        simplest_prioritized_scheduler::~simplest_prioritized_scheduler()
        {
            if( m_allocated.compare_and_swap( false, true ) == true ) {
                delete m_applyStepInstance;
            }
        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        // bool simplest_prioritized_scheduler::is_active()
        // {
        //     return m_status != COMPLETED;
        // }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        void simplest_prioritized_scheduler::do_schedule( schedulable * stepInstance )
        { 
            m_status.compare_and_swap(RUNNING, COMPLETED);
            m_runQueue.push(stepInstance); //, stepInstance->priority());
        }

        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        // %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        void simplest_prioritized_scheduler::wait( const inflight_counter_type & /*steps_in_flight*/ )
        {
            m_status = WAITING;
            // sagnak
            // the chain structure of tasks guarantees that this approach would not scale
            // I believe no steal from a chain is possible, since there is no branching
            // compiles, works almost as fast, the others do not scale either, so
            // keeping this for the time being
            while(!m_runQueue.empty())
            {
                tbb::task_group_context ctx( tbb::task_group_context::isolated, tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait );
                tbb::empty_task * m_rootTask = new( tbb::task::allocate_root( ctx ) ) tbb::empty_task;
                m_rootTask->set_ref_count( 1 );

                schedulable* rootExec;
                m_runQueue.try_pop(rootExec);
                ApplyTask2& rootApply = *new( tbb::task::allocate_additional_child_of( *m_rootTask ) ) ApplyTask2( rootExec, *m_applyStepInstance );

                ApplyTask2* currPrev = &rootApply;
                schedulable* next;
                while(!m_runQueue.empty())
                {
                    m_runQueue.try_pop(next);
                    ApplyTask2* newNextTask = new(tbb::task::allocate_additional_child_of(*m_rootTask)) ApplyTask2(next,*m_applyStepInstance );
                    currPrev->setNext(newNextTask);
                    currPrev = newNextTask;
                }

                tbb::task::self().spawn(rootApply);
                m_rootTask->wait_for_all();
                m_rootTask->set_ref_count( 0 );
                m_rootTask->destroy( *m_rootTask );
            }

            m_status = COMPLETED;
        }
    } // namespace Internal
 /*override*/
 bool on_scheduler_leaving() {
     if( m_leave_ticket == 0 ) return true;
     return m_leave_ticket.fetch_and_store(-1) > 0;
 }