예제 #1
0
static void TestThread2(TSArray<int> *ap)
{
	boost::this_thread::sleep(boost::posix_time::milliseconds(1000));
	cout << "Thread 2 attempting to lock element 0." << endl;
	ap->Lock(0); //Attempt to lock element 0; will block for 4 seconds.
	cout << "Thread 2 locked element 0." << endl;
	(*ap)[0] = 12;

	bar.wait(); //resynchronise

	cout << "Thread 2 locking element 3." << endl;
	ap->Lock(3);
	boost::this_thread::sleep(boost::posix_time::milliseconds(5000));
	cout << "Thread 2 releasing element 3." << endl;
	ap->Unlock(3);

	bar.wait(); //resynchronise

	//A more "real world" example in which two threads are inevitably going to interact

	for(unsigned i=6;i<1000;i++)
	{
		ap->Lock(i);
		(*ap)[i] = (*ap)[i-1] - 1;
		ap->Unlock(i);
	}
}
예제 #2
0
void runner(std::size_t thread_index, boost::barrier& data_barrier, data_t& data) {
    for (std::size_t i = 0; i < 1000; ++ i) {
        fill_data(data.at(thread_index));
        data_barrier.wait();

        if (!thread_index) {
            compute_send_data(data);
        }
        data_barrier.wait();
    }
}
 std::pair<std::vector<unsigned> , unsigned> evaluate(unsigned num_iterations)
 {
     thread_num_iterations = num_iterations;
     thread_score = std::vector<unsigned>(def_decks.size(), 0u);
     thread_total = 0;
     thread_compare = false;
     // unlock all the threads
     main_barrier.wait();
     // wait for the threads
     main_barrier.wait();
     return(std::make_pair(thread_score, thread_total));
 }
예제 #4
0
 bool start() {
   bool throws = false;
   t_ = std::thread([&]() {
     server_->start([&]() { barrier_.wait(); },
                    [&](std::exception_ptr /*ex*/) {
                      throws = true;
                      server_ = nullptr;
                      barrier_.wait();
                    });
   });
   barrier_.wait();
   return !throws;
 }
 std::pair<std::vector<unsigned> , unsigned> compare(unsigned num_iterations, double prev_score)
 {
     thread_num_iterations = num_iterations;
     thread_score = std::vector<unsigned>(def_decks.size(), 0u);
     thread_total = 0;
     thread_prev_score = prev_score;
     thread_compare = true;
     thread_compare_stop = false;
     // unlock all the threads
     main_barrier.wait();
     // wait for the threads
     main_barrier.wait();
     return(std::make_pair(thread_score, thread_total));
 }
예제 #6
0
static void TestThread1(TSArray<int> *ap)
{
	cout << "Thread 1 locking element 0." << endl;
	ap->Lock(0);
	boost::this_thread::sleep(boost::posix_time::milliseconds(5000));
	(*ap)[0] = 10;
	cout << "Thread 1 releasing element 0." << endl;
	ap->Unlock(0);

	bar.wait(); //resynchronise

	boost::this_thread::sleep(boost::posix_time::milliseconds(1000));

	cout << "Thread 1 attempting to lock and set element 1." << endl;
	ap->Lock(1);
	(*ap)[1] = (*ap)[0] + 2;
	ap->Unlock(1);

	cout << "Thread 1 attempting to lock and set element 2." << endl;
	ap->Lock(2);
	(*ap)[2] = (*ap)[1] + 2;
	ap->Unlock(2);

	cout << "Thread 1 attempting to lock and set element 3." << endl;
	ap->Lock(3);
	(*ap)[3] = (*ap)[2] + 2;
	ap->Unlock(3);

	cout << "Thread 1 attempting to lock and set element 4." << endl;
	ap->Lock(4);
	(*ap)[4] = (*ap)[3] + 2;
	ap->Unlock(4);

	cout << "Thread 1 attempting to lock and set element 5." << endl;
	ap->Lock(5);
	(*ap)[5] = (*ap)[4] + 2;
	ap->Unlock(5);

	bar.wait(); //resynchronise

	//A more "real world" example in which two threads are inevitably going to interact

	for(unsigned i=6;i<1000;i++)
	{
		ap->Lock(i);
		(*ap)[i] = (*ap)[i-1] + 1;
		ap->Unlock(i);
	}
}
예제 #7
0
파일: main.cpp 프로젝트: YunSuk/AlloUnity
int main(int argc, char* argv[])
{
    if (argc < 2)
    {
        boost::filesystem::path exePath(argv[0]);
        std::cout << "usage: " << exePath.filename().string() << " <RTSP url of stream>" << std::endl;
        return -1;
    }
    
    boost::program_options::options_description desc("");
	desc.add_options()
		("no-display", "")
		("url", boost::program_options::value<std::string>(), "url")
		("interface", boost::program_options::value<std::string>(), "interface")
		("buffer-size", boost::program_options::value<unsigned long>(), "buffer-size");
    
    boost::program_options::positional_options_description p;
    p.add("url", -1);
    
    boost::program_options::variables_map vm;
    boost::program_options::store(boost::program_options::command_line_parser(argc, argv).
              options(desc).positional(p).run(), vm);
    boost::program_options::notify(vm);
    
    const char* interfaceAddress;
    if (vm.count("interface"))
    {
		interfaceAddress = vm["interface"].as<std::string>().c_str();
    }
    else
    {
		interfaceAddress = "0.0.0.0";
    }

	unsigned long bufferSize;
	if (vm.count("buffer-size"))
	{
		bufferSize = vm["buffer-size"].as<unsigned long>();
	}
	else
	{
		bufferSize = DEFAULT_SINK_BUFFER_SIZE;
	}

	std::cout << "Buffer size " << to_human_readable_byte_count(bufferSize, false, false) << std::endl;

	rtspClient = RTSPCubemapSourceClient::create(vm["url"].as<std::string>().c_str(), bufferSize, AV_PIX_FMT_RGBA, false, interfaceAddress);
    std::function<void (RTSPCubemapSourceClient*, CubemapSource*)> callback(boost::bind(&onDidConnect, _1, _2));
    rtspClient->setOnDidConnect(callback);
    rtspClient->connect();
    
    barrier.wait();
    
    Renderer renderer(cubemapSource);
	renderer.setOnDisplayedCubemapFace(boost::bind(&onDisplayedCubemapFace, _1, _2));
	renderer.setOnDisplayedFrame(boost::bind(&onDisplayedFrame, _1));
    renderer.start(); // Returns when window is closed
    
    CubemapSource::destroy(cubemapSource);
}
예제 #8
0
 void operator() ()
 {
   rtc_clock timer;
   unsigned char tmp[chunk_size];
   thread_sync->wait();
   timer.reset();
   timer.start();
   for(unsigned long i=0;i<BUFFER_SIZE;i+=chunk_size) {
     memcpy(tmp, buffer1 + i, chunk_size);
   }
   timer.stop();
   double mbps = ((double)BUFFER_SIZE)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMREAD_SPEED "<< convert(mbps) << "MB/s";
   timer.reset();
   timer.start();
   for(unsigned long i=0;i<BUFFER_SIZE;i+=chunk_size) {
     memcpy(buffer1 + i, tmp, chunk_size);
   }
   timer.stop();
   mbps = ((double)BUFFER_SIZE)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMWRITE_SPEED "<< convert(mbps) << "MB/s";
   timer.reset();
   timer.start();
   for(unsigned long i=0;i<BUFFER_SIZE;i+=chunk_size) {
     memcpy(buffer2 + i, buffer1 + i, chunk_size);
   }
   timer.stop();
   mbps = ((double)BUFFER_SIZE)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMCPY_SPEED "<< convert(mbps) << "MB/s";
 }
 ~Process()
 {
     destroy_threads = true;
     main_barrier.wait();
     for(auto thread: threads) { thread->join(); }
     for(auto data: threads_data) { delete(data); }
 }
예제 #10
0
 void operator() ()
 {
   rtc_clock timer;
   unsigned char tmp[chunk_size];
   const unsigned long mask = buffer_items - 1;
   thread_sync->wait();
   timer.reset();
   timer.start();
   for(unsigned long i=0,j=0;i<buffer_items;i++) {
     memcpy(tmp, buffer1 + j*chunk_size, chunk_size);
     j=LCG_NEXT(j,mask);
   }
   timer.stop();
   double mbps = ((double)buffer_items*chunk_size)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMREAD_SPEED "<< convert(mbps) << "MB/s";
   timer.reset();
   timer.start();
   for(unsigned long i=0,j=0;i<buffer_items;i++) {
     memcpy(buffer1 + j*chunk_size, tmp, chunk_size);
     j=LCG_NEXT(j,mask);
   }
   timer.stop();
   mbps = ((double)buffer_items*chunk_size)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMWRITE_SPEED "<< convert(mbps) << "MB/s";
   timer.reset();
   timer.start();
   for(unsigned long i=0,j=0;i<buffer_items;i++) {
     memcpy(buffer2 + j*chunk_size, buffer1 + j*chunk_size, chunk_size);
     j=LCG_NEXT(j,mask);
   }
   timer.stop();
   mbps = ((double)buffer_items*chunk_size)/timer.elapsed_time();
   BOOST_LOG_TRIVIAL(info) << "MEMCPY_SPEED "<< convert(mbps) << "MB/s";
 }
예제 #11
0
void stopStreaming()
{

    env->taskScheduler().triggerEvent(removeFaceSubstreamsTriggerId, NULL);
    env->taskScheduler().triggerEvent(removeBinularsSubstreamTriggerId, NULL);
    stopStreamingBarrier.wait();
    
    delete shm;
}
void wait_fn( boost::barrier & b,
              boost::fibers::mutex & mtx,
              boost::fibers::condition_variable & cond,
              bool & flag) {
    b.wait();
	std::unique_lock< boost::fibers::mutex > lk( mtx);
	cond.wait( lk, [&flag](){ return flag; });
	++value;
}
예제 #13
0
void invoke_n_workers(
    boost::barrier& b
    , double& elapsed
    , boost::uint64_t workers
)
{
    b.wait();

    invoke_n_workers_nowait(elapsed, workers);
}
static void
doit(const std::vector<const FgVariant*> & /*sources*/,
     const std::vector<FgVariant*> & /*sinks*/)
{
        // First wait for both threads to reach so we can force
        // one exception to be thrown first. Note that if you get
        // a deadlock here, something is seriously screwed :-)
    m_barrier.wait();
    fgThrow("Thrown from multiple threads at the same time!");
}
void notify_all_fn( boost::barrier & b,
                    boost::fibers::mutex & mtx,
                    boost::fibers::condition_variable & cond,
                    bool & flag) {
    b.wait();
	std::unique_lock< boost::fibers::mutex > lk( mtx);
    flag = true;
    lk.unlock();
	cond.notify_all();
}
static void thread_func_TestPercentiles(boost::barrier& bar, MetricsFactory* factory, const char* name)
{
    Metric& metric = factory->get(name);
    bar.wait();
    for (int j=0; j < 25; j++) {
        double source[] = { 43, 54, 56, 61, 62, 66, 68, 69, 69, 70, 71, 72, 77, 78, 79, 85, 87, 88, 89, 93, 95, 96, 98, 99, 99 };
        for (size_t i=0; i < sizeof(source)/sizeof(*source); i++) {
            metric.update(source[i]);
        }
    }
}
예제 #17
0
//! This function is executed in multiple threads
void thread_fun(boost::barrier& bar)
{
    // Wait until all threads are created
    bar.wait();

    // Now, do some logging
    for (unsigned int i = 0; i < LOG_RECORDS_TO_WRITE; ++i)
    {
        BOOST_LOG(test_lg::get()) << "Log record " << i;
    }
}
예제 #18
0
void test(unsigned int record_count, boost::barrier& bar)
{
    BOOST_LOG_SCOPED_THREAD_TAG("ThreadID", boost::this_thread::get_id());
    bar.wait();

    src::severity_logger< severity_level > slg;
    for (unsigned int i = 0; i < record_count; ++i)
    {
        BOOST_LOG_SEV(slg, warning) << "Test record";
    }
}
static void thread_func_TestHistogram(boost::barrier& bar, MetricsFactory* factory, const char* name)
{
    Metric& metric = factory->get(name);
    bar.wait();
    for (int j=0; j < 100; j++) {
        metric.update(0);
        metric.update(1);
        metric.update(10);
        metric.update(30);
        metric.update(31);
    }
}
void threadFunc(FreeList& pool, ros::atomic<bool>& done, ros::atomic<bool>& failed, boost::barrier& b)
{
  b.wait();

  //ROS_INFO_STREAM("Thread " << boost::this_thread::get_id() << " starting");

  uint32_t* vals[10];
  uint64_t alloc_count = 0;
  while (!done.load())
  {
    for (uint32_t i = 0; i < 10; ++i)
    {
      vals[i] = static_cast<uint32_t*>(pool.allocate());

      if (vals[i])
      {
        ++alloc_count;
        *vals[i] = i;
      }
      else
      {
        ROS_ERROR_STREAM("Thread " << boost::this_thread::get_id() << " failed to allocate");
      }
    }

    for (uint32_t i = 0; i < 10; ++i)
    {
      if (vals[i])
      {
        if (*vals[i] != i)
        {
          ROS_ERROR_STREAM("Thread " << boost::this_thread::get_id() << " val " << vals[i] << " " << i << " = " << *vals[i]);
          failed.store(true);
        }

        pool.free(vals[i]);
      }
    }

    if (failed.load())
    {
#if FREE_LIST_DEBUG
      boost::mutex::scoped_lock lock(g_debug_mutex);
      g_debug.push_back(*pool.getDebug());
#endif
      return;
    }
  }

  //ROS_INFO_STREAM("Thread " << boost::this_thread::get_id() << " allocated " << alloc_count << " blocks");
}
예제 #21
0
//! This function is executed in multiple threads
void thread_fun(boost::barrier& bar)
{
    // Wait until all threads are created
    bar.wait();

    // Here we go. First, identify the thread.
    BOOST_LOG_SCOPED_THREAD_TAG("ThreadID", boost::this_thread::get_id());

    // Now, do some logging
    for (unsigned int i = 0; i < LOG_RECORDS_TO_WRITE; ++i)
    {
        BOOST_LOG(test_lg::get()) << "Log record " << i;
    }
}
예제 #22
0
inline void worker(
    boost::barrier& b
  , boost::uint64_t updates
    )
{
    b.wait();

    for (double i = 0.; i < updates; ++i)
    {
        global_scratch.reset(new double);

        *global_scratch += 1. / (2. * i * (*global_scratch) + 1.);

        global_scratch.reset();
    }
}
예제 #23
0
inline void worker(
    boost::barrier& b
  , std::uint64_t updates
    )
{
    b.wait();

    for (double i = 0.; i < updates; ++i)
    {
        global_scratch = new double;

        *global_scratch += 1. / (2. * i * (*global_scratch) + 1.);

        delete global_scratch;
    }
}
예제 #24
0
    void operator() () {
        timestamp::buf_type buf[2];

        if (barrier != NULL)
            barrier->wait();

        for (int i=0; i < iterations; i++) {
            hr.start_incr();
            int n = timestamp::update_and_write(
                DATE_TIME_WITH_USEC, buf[0], sizeof(buf[0]));
            hr.stop_incr();
            time_val t1 = now_utc();
            if (n != 24 || strlen(buf[0]) != 24) {
                std::cerr << "Wrong buffer length: " << buf[0] << std::endl;
                BOOST_REQUIRE_EQUAL(24, n);
            }
            hr.start_incr();
            timestamp::update_and_write(
                DATE_TIME_WITH_USEC, buf[1], sizeof(buf[1]));
            hr.stop_incr();
            time_val t2 = now_utc();
            if (strcmp(buf[0], buf[1]) > 0) {
                std::cerr << "Backward time jump detected: "
                    << buf[0] << ' ' << buf[1]
                    << '(' << t1.sec() << ' ' << t2.sec() << ')' << std::endl;

                BOOST_REQUIRE(strcmp(buf[1], buf[0]) >= 0);
            }
        }

        const time_val& tv = hr.elapsed_time();
        timestamp::format(TIME_WITH_USEC, tv, buf[0], sizeof(buf[0]));

        {
            std::stringstream s; s.precision(6);
            s << "Thread" << id << " timestamp: "
              #ifdef DEBUG_TIMESTAMP
              << "hrcalls=" << timestamp::hrcalls()
              << ", syscalls=" << timestamp::syscalls()
              #endif
              << ", speed=" << std::fixed << ((double)iterations / tv.seconds())
              << ", latency=" << (1000000000.0*tv.seconds()/iterations) << " ns";
            BOOST_TEST_MESSAGE(s.str());
        }
    }
예제 #25
0
파일: main.cpp 프로젝트: YunSuk/AlloUnity
void onDidConnect(RTSPCubemapSourceClient* client, CubemapSource* cubemapSource)
{
    if (typeid(cubemapSource) == typeid(H264CubemapSource))
    {
        H264CubemapSource* h264CubemapSource = (H264CubemapSource*)cubemapSource;
        
        h264CubemapSource->setOnReceivedNALU       (boost::bind(&onReceivedNALU,        _1, _2, _3, _4));
        h264CubemapSource->setOnReceivedFrame      (boost::bind(&onReceivedFrame,       _1, _2, _3, _4));
        h264CubemapSource->setOnDecodedFrame       (boost::bind(&onDecodedFrame,        _1, _2, _3, _4));
        h264CubemapSource->setOnColorConvertedFrame(boost::bind(&onColorConvertedFrame, _1, _2, _3, _4));
    }
    
    stats.autoSummary(boost::chrono::seconds(10),
					  AlloReceiver::statValsMaker,
					  AlloReceiver::postProcessorMaker,
					  AlloReceiver::formatStringMaker);
    
    ::cubemapSource = cubemapSource;
    
    barrier.wait();
}
void store_lines_from_robotstxt_and_output_them(const std::string &host)
{
	std::string robotstxt = get_robotstxt(host);
	std::istringstream is(robotstxt);
	std::string s;
	while (std::getline(is, s))
	{
		auto p = lines.synchronize();
		p->emplace_back(s);
	}

	// wait() blocks until the required number of threads has called wait();
	// wait() returns true for only one thread
	if (barrier.wait())
	{
		// value() provides unsynchronized access (no synchronization required
		// as only one thread iterates over and outputs the lines)
		for (auto &line : lines.value())
			std::cout << line << '\n';
	}
}
예제 #27
0
파일: thread.hpp 프로젝트: fmrl/blanchett
	void operator()()
	{
		// [mlr] first, i signal that i'm ready and wait until all of my
	   // siblings are ready.

	   std::cout << "thread \"" << my_name << "\" waiting for barrier."
	         << std::endl;

		my_start_barrier->wait();
		// [mlr] i should not need the barrier again.
		my_start_barrier = NULL;

      std::cout << "thread \"" << my_name << "\" beginning service loop."
            << std::endl;

		// [mlr] now, i run the thread's main loop until it's time to exit.
		while (my_service.service())
		   continue;

      std::cout << "thread \"" << my_name << "\" finished."
            << std::endl;
	}
예제 #28
0
void f(
	boost::barrier & tb,
	boost::fibers::spin::barrier & fb)
{
	std::stringstream tss;
	tss << boost::this_thread::get_id();

	fprintf( stderr, "start thread %s\n", tss.str().c_str() );

	boost::fibers::scheduler<> sched;
	
	sched.make_fiber( & fn1, boost::ref( fb), boost::fiber::default_stacksize);

	tb.wait();

	for (;;)
	{
		while ( sched.run() );
		if ( sched.empty() ) break;
	}

	fprintf( stderr, "end thread %s\n", tss.str().c_str() );
}
예제 #29
0
void FiberDriverServer::worker(boost::barrier& b)
{
    b.wait();
    LOG(INFO) << "thread running.";
    for(;;)
    {
        try
        {
            parent_type::run();
            // normal exit
            if (normal_stop_)
                break;
            else
            {
                LOG(ERROR) << "driver stopped by accident.";
            }
        }
        catch (std::exception& e)
        {
            LOG(ERROR) << "[DriverServer] "
                << e.what() << std::endl;
        }
    }
}
예제 #30
0
/// thread execution function
static void thread_function(boost::barrier& b)
{
    b.wait(); /// wait until memory barrier allows the execution
    boost::mutex::scoped_lock lock(m); /// lock mutex
    BOOST_CHECK_EQUAL(1,0); /// produce the fault
}