void LandingTabuSearch<TTabuSearch, TSolution>::run(const size_t numberOfSteps)
{
    // 1. create random solutions via randomWalk procedure
    std::srand(unsigned (time(0)));
    std::vector<TSolution> randomSolutions;
    for (size_t i = 0; i < numberOfLandings; ++i)
    {
        randomSolutions.emplace_back(randomWalk(bestSolution, depth, data.getNumberOfServers(), data.getNumberOfDisks()));
    }

    // 2. ditribute random solution
    auto task = [&randomSolutions, numberOfSteps](TTabuSearch tabuSearch, size_t idx) -> void
    {
        tabuSearch.setStartSolution(randomSolutions[idx]);
        tabuSearch.run(numberOfSteps);

        assert(idx >= 0 && idx < randomSolutions.size());
        randomSolutions[idx] = tabuSearch.getBestSolution();
    };

    std::vector<std::future<void>> futures(numberOfLandings - 1);
    for (size_t i = 0; i < futures.size(); ++i)
    {
        futures[i] = scheduler->schedule(task, tabuSearch, i);
    }
    tabuSearch.run(numberOfSteps);
    randomSolutions.back() = tabuSearch.getBestSolution();

    // 3. gather results and choise best solution
    for (auto& future : futures) { future.get(); }
    auto best = std::min_element(randomSolutions.begin(), randomSolutions.end());
    bestSolution = *best;
}
Example #2
0
void parallel_for_each_v1(Iterator first, Iterator last, Func f)
{
   const unsigned long length = std::distance(first, last);

   if(!length)
      return;

   const unsigned long min_per_thread = 25;
   const unsigned long max_threads = 
      (length + min_per_thread - 1)/min_per_thread;

   const unsigned long hardware_threads = std::thread::hardware_concurrency();
   const unsigned long num_threads = std::min(hardware_threads != 0 ? hardware_threads : 2, max_threads);
   const unsigned long block_size = length/num_threads;

   std::vector<std::future<void> > futures(num_threads - 1);
   std::vector<std::thread> threads(num_threads - 1);
   join_threads joiner(threads);

   Iterator block_start = first;
   for(unsigned long i = 0; i < num_threads - 1; ++i)
   {
      Iterator block_end = block_start;
      std::advance(block_end, block_size);
      std::packaged_task<void(void)> task([=]{std::for_each(block_start, block_end, f);});
      futures[i] = task.get_future();
      threads[i] = std::thread(std::move(task));
      block_start = block_end;
   }
   std::for_each(block_start, last, f);
   for(unsigned long i = 0; i < num_threads - 1; ++i)
      futures[i].get();
}
T parallel_accumulate(Iterator first,Iterator last,T init)
{
    unsigned long const length=static_cast<unsigned long>(std::distance(first,last));

    if(!length)
        return init;

    unsigned long const block_size=25;
    unsigned long const num_blocks=(length+block_size-1)/block_size;

    boost::csbl::vector<boost::future<T> > futures(num_blocks-1);
    boost::basic_thread_pool pool;

    Iterator block_start=first;
    for(unsigned long i=0;i<(num_blocks-1);++i)
    {
        Iterator block_end=block_start;
        std::advance(block_end,block_size);
        futures[i]=boost::async(pool, accumulate_block<Iterator,T>(), block_start, block_end);
        block_start=block_end;
    }
    T last_result=accumulate_block<Iterator,T>()(block_start,last);
    T result=init;
    for(unsigned long i=0;i<(num_blocks-1);++i)
    {
        result+=futures[i].get();
    }
    result += last_result;
    return result;
}
Example #4
0
result_type sum_approach1(int const N) {
   QVector<QFuture<result_type>> futures(N);
   int id = 0;
   for (auto &future : futures)
      future = QtConcurrent::run(map_function, id++);
   return std::accumulate(futures.cbegin(), futures.cend(), result_type{}, sum_function);
}
Example #5
0
T parallel_accumulate(Iterator first, Iterator last, T init)
{
    unsigned long const length = std::distance(first, last);

    if (!length) {
        return init;
    }

    unsigned long const block_size = 25;
    unsigned long const num_blocks = (length + block_size - 1) / block_size;

    std::vector<std::future<T> > futures(num_blocks - 1);
    thread_pool pool;

    Iterator block_start = first;

    for (unsigned long i = 0; i < (num_threads - 1); ++i) {
        Iterator block_end = block_start;
        std::advance(block_end, block_size);
        futures[i] = pool.submit(accumulate_block<Iterator, T>());
        block_start = block_end;
    }

    T last_result = accumulate_block()(block_start, last);
    T result = init;

    for (unsigned long i = 0; i < (num_blocks - 1); ++i) {
        result += futures[i].get();
    }

    result += last_result;
    return result;
}
Example #6
0
	Image
	ParallelRenderer::render (World& _world, Settings& _settings, Engine&
	    _engine, SuperSampling& _super_sampling)
	{
		TaskDispatcher task_dispatcher(_settings);
		std::vector<std::future<Tiles>> futures(0);

		for (unsigned i = 0; i < _settings.max_thread_count; i++)
		{
			// TODO can this be done better? it must be possible
			futures.push_back(std::async(std::launch::async, [this, &task_dispatcher,
			    &_world, &_settings, &_engine, &_super_sampling] () { 
					return worker(task_dispatcher, _world, _settings, _engine,
					_super_sampling); }));
		}

		for (unsigned i = 0; i < futures.size(); i++)
		{
			futures[i].wait();
		}

		Image final_image(_settings.area.size);
		for (unsigned i = 0; i < futures.size(); i++)
		{
			Tiles tiles = futures[i].get();
			for (unsigned j = 0; j < tiles.size(); j++)
			{
				final_image.paste(tiles[j].task.start, tiles[j].image);
			}
		}

		return final_image;
	}
//------------------------------------------------------------------------------
int main(int argc, char** argv) {
    const int numthreads = argc > 1 ? atoi(argv[1]) : 0;
    bool wait = true;
    Futures futures(createthreads(wait, numthreads));
    std::cout << threadreport() << std::endl;
    wait = false;    
    barrier(futures.begin(), futures.end());
    return 0;
}
Example #8
0
result_list runner::test_parallel(std::vector<runner::path_type> const & files, report_type & report, std::size_t jobs) const
{
    result_list results;

    if (files.empty())
    {
        return results;
    }

    if (jobs == 0)
    {
        jobs = 1;
    }

    std::size_t chunk_size = files.size() / jobs;

    if (chunk_size == 0)
    {
        chunk_size = files.size();
        jobs = 1;
    }

    std::launch launch(jobs == 1 ? std::launch::deferred : std::launch::async);
    std::vector<std::future<result_list>> futures(jobs);
    std::atomic<std::size_t> fail_count(0);

    for (std::size_t i = 0; i < jobs; i++)
    {
        files_iterator begin(files.begin() + i * chunk_size);
        files_iterator end(files.begin() + (i + 1) * chunk_size);

        // Handle remainder of files.size() / jobs
        if (i == jobs - 1)
        {
            end = files.end();
        }

        futures[i] = std::async(launch, &runner::test_range, this, begin, end, std::ref(report), std::ref(fail_count));
    }

    for (auto & f : futures)
    {
        result_list r = f.get();
        std::move(r.begin(), r.end(), std::back_inserter(results));
    }

    return results;
}
Example #9
0
std::vector<AudioBuffer> SoundFX::load_samples(const AudioSpec &_spec, const samples_t &_samples)
{
	std::vector<std::future<void>> futures(_samples.size());
	std::vector<AudioBuffer> buffers(_samples.size());
	for(unsigned i=0; i<_samples.size(); ++i) {
		futures[i] = std::async(std::launch::async, [_spec,i,&buffers,&_samples]() {
			PINFOF(LOG_V1, LOG_AUDIO, "loading %s for %s sound fx\n",
					_samples[i].file, _samples[i].name);
			load_audio_file(_samples[i].file, buffers[i], _spec);
		});
	}
	for(unsigned i=0; i<_samples.size(); ++i) {
		futures[i].wait();
	}
	return buffers;
}
Example #10
0
T parallel_accumulate(Iterator first,Iterator last,T init)
{
    unsigned long const length=std::distance(first,last);

    if(!length)
        return init;

    unsigned long const min_per_thread=25;
    unsigned long const max_threads=
        (length+min_per_thread-1)/min_per_thread;

    unsigned long const hardware_threads=
        std::thread::hardware_concurrency();

    unsigned long const num_threads=
        std::min(hardware_threads!=0?hardware_threads:2,max_threads);

    unsigned long const block_size=length/num_threads;

    std::vector<std::future<T> > futures(num_threads-1);
    std::vector<std::thread> threads(num_threads-1);

    Iterator block_start=first;
    for(unsigned long i=0;i<(num_threads-1);++i)
    {
        Iterator block_end=block_start;
        std::advance(block_end,block_size);
        std::packaged_task<T(Iterator,Iterator)> task(
            accumulate_block<Iterator,T>());
        futures[i]=task.get_future();
        threads[i]=std::thread(std::move(task),block_start,block_end);
        block_start=block_end;
    }
    T last_result=accumulate_block()(block_start,last);

    std::for_each(threads.begin(),threads.end(),
                  std::mem_fn(&std::thread::join));

    T result=init;
    for(unsigned long i=0;i<(num_threads-1);++i)
    {
        result+=futures[i].get();
    }
    result += last_result;
    return result;
}
Example #11
0
void FavIconTest::concurrentRequestsShouldWork()
{
    const int numThreads = 3;
    QThreadPool tp;
    tp.setMaxThreadCount(numThreads);
    QVector<QFuture<QString>> futures(numThreads);
    for (int i = 0; i < numThreads; ++i) {
        futures[i] = QtConcurrent::run(&tp, getAltIconUrl);
    }
    QVERIFY(tp.waitForDone(60000));

    const QString firstResult = futures.at(0).result();
    for (int i = 1; i < numThreads; ++i) {
        QCOMPARE(futures.at(i).result(), firstResult);
    }
    QVERIFY(!QPixmap(firstResult).isNull());
}
Example #12
0
int main(int argc, char* argv[])
{
  std::atomic<size_t> completionCount;
  completionCount = 0;

  std::vector<std::future<size_t>> futures(100);
  for (size_t i = 0; i < 100; ++i)
  {
    futures[i] = std::async([&completionCount]()
                            { return ++completionCount; });
  }

  for (size_t i = 0; i < futures.size(); ++i)
  {
    std::cout << futures[i].get();
    if (i < futures.size() - 1) std::cout << ", ";
  }
  std::cout << std::endl;
}
Example #13
0
void par_for(int begin, int end, F fn) {
  std::atomic<int> idx;
  idx = begin;

  int num_cpus = std::thread::hardware_concurrency();
  num_cpus = std::min(num_cpus, end - begin);
  std::vector<std::future<void>> futures(num_cpus);
  for (int cpu = 0; cpu != num_cpus; ++cpu) {
    futures[cpu] = std::async(
      std::launch::async,
      [cpu, &idx, end, &fn]() {
        for (;;) {
          int i = idx++;
          if (i >= end) break;
          fn(i, cpu);
        }
      }
    );
  }
  for (int cpu = 0; cpu != num_cpus; ++cpu) {
    futures[cpu].get();
  }
};
Example #14
0
File: main.cpp Project: CCJY/coliru
void parallel_for_each(I rangeStart, I rangeEnd, F callback, int numSegments = 0)
{
    int numValues = std::distance(rangeStart,rangeEnd);
    numSegments = numSegments > 0 ? numSegments : numValues;
    int segmentSize = numValues/numSegments;
    
    std::vector<std::future<void>> futures(numSegments);
    int segment = 0;
    for (auto &future: futures)
    {
        future = std::async(std::launch::async, [=,&callback](){
            auto segmentStart = rangeStart + segment*segmentSize;
            auto segmentEnd =  segment+1 < numSegments ? rangeStart + (segment+1)*segmentSize : rangeEnd;
    
            for (auto i = segmentStart; i != segmentEnd; ++i) callback(*i);
        });
        ++segment;
    }
    
    for (auto &future: futures)
    {
        future.wait();
    }
}
Example #15
0
void PlayTest_Tune(unsigned int rollbacks, unsigned int plays, unsigned int population_size, unsigned int tournament_size, unsigned int latency) {
	std::mt19937 rng(RandomSeed());

	// create initial population
	std::vector<TuneElement> population(population_size);
	for(unsigned int i = 0; i < population_size; ++i) {
		GetDefaultHeuristicParameters(&population[i].m_parameters);
		population[i].m_score = 20000; // guess, should be relatively low
	}

	// simulate plays
	std::vector<TuneElement> history(plays);
	std::vector<std::future<unsigned int> > futures(latency);
	for(unsigned int p = 0; p < plays + latency; ++p) {
		std::cout << "Tune progress: " << 100 * p / (plays + latency) << "%" << std::endl;

		// add completed play to the population
		if(p >= latency) {
			history[p - latency].m_score = futures[p % latency].get();
			population[(p - latency) % population_size] = history[p - latency];
		}

		// tournament selection
		TuneElement best1, best2;
		GetDefaultHeuristicParameters(&best1.m_parameters);
		GetDefaultHeuristicParameters(&best2.m_parameters);
		best1.m_score = 0;
		best2.m_score = 0;
		for(unsigned int t = 0; t < tournament_size; ++t) {
			unsigned int sel1 = rng() % population_size;
			if(population[sel1].m_score > best1.m_score)
				best1 = population[sel1];
			unsigned int sel2 = rng() % population_size;
			if(population[sel2].m_score > best2.m_score)
				best2 = population[sel2];
		}

		// create winner
		HeuristicParameters winner;
		std::cout << "Winner (" << best1.m_score << "|" << best2.m_score << "): ";
		for(unsigned int i = 0; i < PARAM_COUNT; ++i) {
			winner.m_values[i] = (best1.m_parameters.m_values[i] + best2.m_parameters.m_values[i] + (rng() & 1)) / 2;
			std::cout << winner.m_values[i] << " ";
		}
		std::cout << std::endl;

		if(p < plays) {

			// do some mutations
			for(unsigned int i = 0; i < PARAM_COUNT; ++i) {
				winner.m_values[i] = Mutate(winner.m_values[i], PARAMETERS_MIN[i], PARAMETERS_MAX[i], PARAMETERS_STEP[i], rng);
			}

			// start the job
			history[p].m_parameters = winner;
			futures[p % latency] = StartJob(PlayTest, winner, rollbacks, (BoardDB*) NULL, (std::mutex*) NULL);

		}

	}

	std::cout << "scores = array([\n\t";
	for(unsigned int p = 0; p < plays; ++p) {
		std::cout << history[p].m_score;
		if(p != plays - 1) {
			if(p % 20 == 19)
				std::cout << ",\n\t";
			else
				std::cout << ", ";
		}
	}
	std::cout << "])" << std::endl;

	// calculate population average
	HeuristicParameters population_average;
	std::cout << "Population average: ";
	for(unsigned int i = 0; i < PARAM_COUNT; ++i) {
		population_average.m_values[i] = 0;
		for(unsigned int p = 0; p < population_size; ++p) {
			population_average.m_values[i] += population[p].m_parameters.m_values[i];
		}
		population_average.m_values[i] = (population_average.m_values[i] + population_size / 2) / population_size;
		std::cout << population_average.m_values[i] << " ";
	}
	std::cout << std::endl;

}