Пример #1
0
 freelist_tester(void):
     free_nodes(0), fl(max_nodes * thread_count)
 {
     for (int i = 0; i != thread_count; ++i)
         threads.create_thread(boost::bind(&freelist_tester::run, this));
     threads.join_all();
 }
Пример #2
0
 void serverMake(int port, std::string address) {
     server serv(*io_service_, port, address);
   for(int i=0;i<5;i++) {
       threads->create_thread(boost::bind(getstart,io_service_));
   }
     threads->join_all();
 }
Пример #3
0
    ~TestingSetup()
    {
        threadGroup.interrupt_all();
        threadGroup.join_all();
        Bitcredit_UnregisterNodeSignals(Credits_NetParams()->GetNodeSignals());
#ifdef ENABLE_WALLET
        delete bitcoin_pwalletMain;
        bitcoin_pwalletMain = NULL;
        delete bitcredit_pwalletMain;
        bitcredit_pwalletMain = NULL;
        delete deposit_pwalletMain;
        deposit_pwalletMain = NULL;
#endif
        delete credits_pcoinsTip;
        delete bitcredit_pcoinsdbview;
        delete bitcredit_pblocktree;

        delete bitcoin_pcoinsTip;
        delete bitcoin_pcoinsdbview;
        delete bitcoin_pblocktree;
#ifdef ENABLE_WALLET
        bitcoin_bitdb.Flush(true);
        bitcredit_bitdb.Flush(true);
        deposit_bitdb.Flush(true);
#endif
        boost::filesystem::remove_all(pathTemp);
    }
Пример #4
0
 virtual ~Log()
 {
     _isStopping = true;
     _threadPool.interrupt_all();
     _threadPool.join_all();
     _stringLoggerThread.interrupt();
     _stringLoggerThread.join();
 }
Пример #5
0
// Sets that the program should finish.
void deinit()
{
	programRunning = FALSE;
#if _USE_MT
	threads.join_all();
#endif

	_aligned_free(spherePositions);
	_aligned_free(sphereData);
}
Пример #6
0
 ~TestingSetup()
 {
     threadGroup.interrupt_all();
     threadGroup.join_all();
     delete pwalletMain;
     pwalletMain = NULL;
     delete pcoinsTip;
     delete pcoinsdbview;
     delete pblocktree;
     bitdb.Flush(true);
     boost::filesystem::remove_all(pathTemp);
 }
Пример #7
0
    /// @brief Destructor.
    ~ThreadPool() {
        // Set running flag to false then notify all threads.
        {
            boost::unique_lock< boost::mutex > lock(mutex_);
            running_ = false;
            condition_.notify_all();
        }

        try {
            threads_.join_all();
        }
        // Suppress all exceptions.
        catch (const std::exception&) {}
    }
Пример #8
0
	void stop()
	{
      std::cout << "main pool thread sending quit job."
            << std::endl;

      job::smart_ptr q(new quit());
      const typename threads_type::iterator i_end = my_threads.end();
      for (typename threads_type::iterator i = my_threads.begin(); i != i_end; ++i)
         (*i)->push(q);
      std::cout << "main pool thread waiting for children to terminate."
            << std::endl;
      my_group.join_all();
      static nyan::deletes< thread_type > f;
      std::for_each(my_threads.begin(), my_threads.end(), f);
	}
Пример #9
0
void client::run_test()
{
	unsigned optimal_threads_count = boost::thread::hardware_concurrency();

	if (optimal_threads_count == 0)
		optimal_threads_count = 1;

	ostream_ << "Create " << optimal_threads_count << " threads for client" << std::endl;

	for (unsigned i = 0; i < optimal_threads_count; ++i)
		threads_.add_thread(new boost::thread([this, i] () { thread_func(i); }));

	threads_.join_all();
	ostream_ << "All client's threads done" << std::endl;
}
Пример #10
0
void AsyncSpinnerImpl::stop()
{
  boost::mutex::scoped_lock lock(mutex_);
  if (!continue_)
    return;

  ROS_ASSERT_MSG(member_spinlock.owns_lock(), 
                 "Async spinner's member lock doesn't own the global spinlock, hrm.");
  ROS_ASSERT_MSG(member_spinlock.mutex() == &spinmutex, 
                 "Async spinner's member lock owns a lock on the wrong mutex?!?!?");
  member_spinlock.unlock();

  continue_ = false;
  threads_.join_all();
}
Пример #11
0
    ~TestingSetup()
    {
        threadGroup.interrupt_all();
        threadGroup.join_all();
        UnregisterNodeSignals(GetNodeSignals());
#ifdef ENABLE_WALLET
        delete pwalletMain;
        pwalletMain = NULL;
#endif
        delete pblocktree;
	delete pviewTip;
#ifdef ENABLE_WALLET
        bitdb.Flush(true);
#endif
        boost::filesystem::remove_all(pathTemp);
    }
Пример #12
0
   ~thread_pool()
   {
      DLOG(INFO) << "stopping service...";
      // Force all threads to return from io_service::run().
      io_service_.stop();

      DLOG(INFO) << "joining threads...";
      try
      {
         threads_.join_all();
      }
      catch (...)
      {
         LOG_EXCEPTION();
      }
      DLOG(INFO) << __func__;
   }
Пример #13
0
int main(int argc, char** argv) {
    init();
    if (handle_args(argc, argv) == 1)
        return 0;

    int processors = boost::thread::hardware_concurrency();

    ioService.post(boost::bind(read_images));
    ioService.post(boost::bind(assign_workers));
    ioService.post(boost::bind(output));

    boost::asio::io_service::work work(ioService);
    for (int i = 0; i < processors; i++) {
        threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioService));
    }
    threadpool.join_all();
    return 0;
}
Пример #14
0
int main()
{
  {
    boost::thread_group threads;
    for (int i = 0; i < 3; ++i)
        threads.create_thread(&increment_count);
    threads.join_all();
  }
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
  {
    boost::thread_group threads;
    for (int i = 0; i < 3; ++i)
        threads.create_thread(&increment_count);
    threads.interrupt_all();
    threads.join_all();
  }
#endif
  {
    boost::thread_group threads;
    boost::thread* th = new boost::thread(&increment_count);
    threads.add_thread(th);
    BOOST_TEST(! threads.is_this_thread_in());
    threads.join_all();
  }
  {
    boost::thread_group threads;
    boost::thread* th = new boost::thread(&increment_count);
    threads.add_thread(th);
    BOOST_TEST(threads.is_thread_in(th));
    threads.remove_thread(th);
    BOOST_TEST(! threads.is_thread_in(th));
    th->join();
  }
  {
    {
      boost::unique_lock<boost::mutex> lock(mutex);
      boost::thread* th2 = new boost::thread(&increment_count_2);
      threads2.add_thread(th2);
    }
    threads2.join_all();
  }
  return boost::report_errors();
}
Пример #15
0
int main()
{
    std::ifstream input(BINARY_DIR"/input.txt", std::ios::binary);
    SyncClass sc;
    for(int i = 0;i < THREADS_COUNT; i++)
        threads.create_thread(boost::bind(&SyncClass::writeMessage, &sc) );

    while(!input.eof()){
        Message msg(input);
        if(input.eof())
            break;
        boost::mutex::scoped_lock lock(queueMutex);
        msgQueue.push(msg);
    }
    sc.stopWorking();
    threads.join_all();

    return 0;
}
int main(int argc, char **argv)
{
	int i = 0;
	boost::thread_group reads;
	
	//std::vector<FFMPEGData> vidData;
	//FFMPEGData* vidData = new FFMPEGData[nodeNumber];

	av_register_all();
	avformat_network_init();
	// Create a struct entry for every node.
	//vidData.push_back(FFMPEGData());


	for (i = 0; i < nodeNumber; i++)
	{
		// Set the sdp file for each node.
		sprintf_s(vidData[i].filepath, "test%d.sdp", (i + 1));

		// Print the file path for debugging.
		printf("Filepath = %s\n", vidData[i].filepath);

		vidData[i].restart = 0;
		vidData[i].windowName = i;

		reads.add_thread(new boost::thread(thread, boost::ref(vidData[i])));
		// plays.add_thread(new boost::thread(play, vidData[i]));
		//boost::scoped_thread<> t{ boost::thread{ thread } };
		//boost::scoped_thread<> f{ boost::thread{ play } };

	}
	reads.join_all();
	plays.join_all();
	//If the code ever reaches here close the pipe before finishing.
	delete[] vidData;
	return 0;
}
Пример #17
0
int main(int argc, char** argv)
{
	int args = 1;
#ifdef USE_OPENCL
	if (argc < 10) {
#else
	if (argc < 7) {
#endif
		std::cout << "Not enough arguments" << std::endl;
		system("pause");
		return 1;
	}

	DIM = util::toInt(argv[args++]);
	N = util::toInt(argv[args++]);
	K = util::toInt(argv[args++]);
	ITERATIONS = util::toInt(argv[args++]);
	RUNS = util::toInt(argv[args++]);
#ifdef USE_OPENCL
	AM_LWS = util::toInt(argv[args++]);
	RP_LWS = util::toInt(argv[args++]);
	CT_LWS = util::toInt(argv[args++]);

	USE_ALL_DEVICES = util::toInt(argv[args++]);
#else
	device_count = util::toInt(argv[args++]);
#endif


	std::cout << "DIM = " << DIM << std::endl;
	std::cout << "N = " << N << std::endl;
	std::cout << "K = " << K << std::endl;
	std::cout << "ITERATIONS = " << ITERATIONS << std::endl;
	std::cout << "RUNS = " << RUNS << std::endl;
#ifdef USE_OPENCL
	std::cout << "AM_LWS = " << AM_LWS << std::endl;
	std::cout << "RP_LWS = " << RP_LWS << std::endl;
	std::cout << "CT_LWS = " << CT_LWS << std::endl;
	std::cout << "USE_ALL_DEVICES = " << USE_ALL_DEVICES << std::endl << std::endl;
#else
	std::cout << "device_count = " << device_count << std::endl << std::endl;
#endif


#ifdef _WIN32
	rng.seed();
	srand(GetTickCount());
#else
	rng.seed();
	srand(getTimeMs());
#endif

	u = boost::uniform_real<float>(0.0f, 1000000.0f);
	gen = new boost::variate_generator<boost::mt19937&, boost::uniform_real<float> >(rng, u);

#ifdef USE_OPENCL
	cl_int clError = CL_SUCCESS;
	initCL();

	for (int i = 0; i < clDevices.size(); ++i) {

		clInputBuf.push_back(cl::Buffer(clContext, CL_MEM_READ_ONLY, N * DIM * sizeof(float), NULL, &clError));
		if (clError != CL_SUCCESS) std::cout << "OpenCL Error: Could not create buffer" << std::endl;

		clCentroidBuf.push_back(cl::Buffer(clContext, CL_MEM_READ_WRITE, K * DIM * sizeof(float), NULL, &clError));
		if (clError != CL_SUCCESS) std::cout << "OpenCL Error: Could not create buffer" << std::endl;

		clMappingBuf.push_back(cl::Buffer(clContext, CL_MEM_READ_WRITE, N * sizeof(int), NULL, &clError));
		if (clError != CL_SUCCESS) std::cout << "OpenCL Error: Could not create buffer" << std::endl;

		clReductionBuf.push_back(cl::Buffer(clContext, CL_MEM_WRITE_ONLY, N * sizeof(float), NULL, &clError));
		if (clError != CL_SUCCESS) std::cout << "OpenCL Error: Could not create buffer" << std::endl;

		clClusterAssignment[i].setArgs(clInputBuf[i](), clCentroidBuf[i](), clMappingBuf[i]());
		clClusterReposition[i].setArgs(clInputBuf[i](), clMappingBuf[i](), clCentroidBuf[i]());
		clClusterReposition_k[i].setArgs(clInputBuf[i](), clMappingBuf[i](), clCentroidBuf[i]());
		//clClusterReposition_k_c[i].setArgs(clInputBuf[i](), clMappingBuf[i](), clCentroidBuf[i](), clConvergedBuf[i]());
		clComputeCost[i].setArgs(clInputBuf[i](), clCentroidBuf[i](), clMappingBuf[i](), clReductionBuf[i]());

	}

	device_count = clDevices.size();
#endif

	util::Clock clock;
	clock.reset();

	for (int i = 0; i < RUNS; ++i) {
		mapping_list.push_back(NULL);
		centroids_list.push_back(NULL);
		cost_list.push_back(0.0f);
	}

	float* source = new float[N*DIM];
	for (int i = 0; i < N*DIM; ++i)
		source[i] = gen_random_float();

	input_list.push_back(source);

	for (int i = 1; i < device_count; ++i) {
		float* copy = new float[N*DIM];
		memcpy(copy, source, N*DIM*sizeof(float));
		input_list.push_back(copy);
	}

	if (device_count > 1) {
		boost::thread_group threads;

		for (int i = 0; i < device_count; ++i) {
			threads.create_thread(boost::bind(exec, i, true));
		}

		threads.join_all();
	} else {
		exec(0, false);
	}

#ifdef USE_OPENCL
	reduction_group.join_all();
#endif

	int best_result = 0;
	float best_cost = std::numeric_limits<float>::max();
	for (int i = 0; i < RUNS; ++i) {
		if (cost_list[i] < best_cost) {
			best_cost = cost_list[i];
			best_result = i;
		}
	}

	FILE *out_fdesc = fopen("centroids.out", "wb");
	fwrite((void*)centroids_list[best_result], K * DIM * sizeof(float), 1, out_fdesc);
	fclose(out_fdesc);

	out_fdesc = fopen("mapping.out", "wb");
	fwrite((void*)mapping_list[best_result], N * sizeof(int), 1, out_fdesc);
	fclose(out_fdesc);

	std::cout << "Best result is " << best_result << std::endl;

	for (int i = 0; i < device_count; ++i) {
		delete[] input_list[i];
	}

	for (int i = 0; i < RUNS; ++i) {
		delete[] mapping_list[i];
		delete[] centroids_list[i];
	}

	float now = clock.get();
	std::cout << "Total: " << now << std::endl;

	system("pause");

	return 0;
}
 void run()
 {
     start();
     m_threads.join_all();
 }
Пример #19
0
            ~bench_atomic() {
			    threads.join_all();
                std::cerr << "bench_atomic: x = " << x << ", elapsed time is " << timer.elapsed()
                          << "\n";
		    }
Пример #20
0
            ~bench_no_lock() {
			    threads.join_all();
                std::cerr << "bench_no_lock: x = " << x << ", elapsed time is " << timer.elapsed()
                          << "\n";
		    }
Пример #21
0
            ~bench_boost_mutex() {
			    threads.join_all();
                std::cerr << "bench_boost_mutex: x = " << x << ", elapsed time is " << timer.elapsed()
                          << "\n";
		    }
Пример #22
0
 void wait()
 {
     // wait for them
     group_.join_all();
     BOOST_CHECK(my_aspect_map_.size() == 0);
 }
Пример #23
0
void M6Processor::Process(vector<fs::path>& inFiles, M6Progress& inProgress,
    uint32 inNrOfThreads)
{
    if (inFiles.size() >= inNrOfThreads)
        mUseDocQueue = false;
    else
    {
        mUseDocQueue = true;
        for (uint32 i = 0; i < inNrOfThreads; ++i)
            mDocThreads.create_thread([this]() { this->ProcessDocument(); });
    }

    if (inFiles.size() == 1)
    {
        M6DataSource data(inFiles.front(), inProgress);
        for (M6DataSource::iterator i = data.begin(); i != data.end(); ++i)
        {
            LOG(INFO, "M6Processor: processing file %s", i->mFilename.c_str());
            ProcessFile(i->mFilename, i->mStream);
            LOG(INFO, "M6Processor: done processing file %s", i->mFilename.c_str());
        }
    }
    else
    {
        if (inNrOfThreads > inFiles.size())
            inNrOfThreads = static_cast<uint32>(inFiles.size());

        for (uint32 i = 0; i < inNrOfThreads; ++i)
            mFileThreads.create_thread(
                [&inProgress, this]() { this->ProcessFile(inProgress); }
            );

        for (fs::path& file : inFiles)
        {
            if (not (mException == std::exception_ptr()))
                rethrow_exception(mException);

            if (not fs::exists(file))
            {
                cerr << "file missing: " << file << endl;
                continue;
            }

            mFileQueue.Put(file);
        }

        mFileQueue.Put(fs::path());

        // Now all the input files have been added to the queue.

        mFileThreads.join_all();
    }

    if (mUseDocQueue)
    {
        mDocQueue.Put(kSentinel);
        mDocThreads.join_all();
    }

    if (not (mException == std::exception_ptr()))
        rethrow_exception(mException);
}
Пример #24
0
	void stop()
	{
		// correct stop

		tg.join_all();
	}
 ~Threadpool() {
     Logger::pLOG->info("Delete threadpool of DOR");
     _tg.interrupt_all();
     _tg.join_all();
 }