Exemplo n.º 1
0
int main() {
    using primes = cs477::future<std::vector<int>>;
    bool quit = false;
    clock_t t;

    const unsigned int NUM_VALUES = 10'000'000;
    const unsigned int NUM_THREADS = std::thread::hardware_concurrency() * 3;
    const unsigned int SECTION_SIZE = NUM_VALUES / NUM_THREADS;
    int temp = 0; //used to get block range

    std::vector<primes> f;
    t = clock();

    for (unsigned int i = 0; i < NUM_THREADS; ++i) {
        unsigned int first = temp;
        //set temp as top of the range and compensate for a remainder from dividing into blocks
        temp += SECTION_SIZE + (i < (NUM_VALUES % SECTION_SIZE) ? 1 : 0);
        unsigned int last = temp;

        auto section = cs477::queue_work([i, first, last] {
            std::vector<int> v;
            for (unsigned int k = first; k < last; ++k) {
                if (is_prime(k)) v.push_back(k);
            }
            return v;
        });
        std::cout << "Thread " << std::setw(get_num_digits(NUM_THREADS)) << std::left << i
                  << ": " << std::setw(get_num_digits(NUM_VALUES) - 1) << std::right << first
                  << " - " << std::setw(get_num_digits(NUM_VALUES)) << std::right << last << "\n";

        f.push_back(std::move(section));
    }

    printf("\n");

    auto all = when_all(f.begin(), f.end());

    int sec = 0;
    int sum = 0;
    int prev = 0;
    int total = 0;

    //set through each section to get section totals and overall total
    for (auto&& section : all.get()) {
        prev = sum;
        for (auto i : section.get()) {
            sum++;
            total++;
        }
        //reset from each section to get correct sum
        sum -= prev;
        std::cout << "Thread " << std::setw(get_num_digits(NUM_THREADS)) << std::left << sec++ << ": " << sum << " primes" << std::endl;
    }

    t = clock() - t;
    printf("\nThere are %d total prime numbers under %d\nTime: %f seconds\n\nQuit?(y/n): ", total, NUM_VALUES, ((float)t) / CLOCKS_PER_SEC);

    return 0;
}
Exemplo n.º 2
0
 std::vector<lcos::future<T> >
 wait_all(lcos::future<T> f0 , lcos::future<T> f1 , lcos::future<T> f2,
     error_code& ec = throws)
 {
     typedef std::vector<lcos::future<T> > result_type;
     lcos::future<result_type> f = when_all(
         f0 , f1 , f2);
     if (!f.valid()) {
         { if (&ec == &hpx::throws) { HPX_THROW_EXCEPTION( uninitialized_value, "lcos::wait_all", "lcos::when_all didn't return a valid future"); } else { ec = make_error_code(static_cast<hpx::error>( uninitialized_value), "lcos::when_all didn't return a valid future", "lcos::wait_all", "D:/Devel\\hpx\\hpx\\lcos\\wait_all.hpp", 435, (ec.category() == hpx::get_lightweight_hpx_category()) ? hpx::lightweight : hpx::plain); } };
         return result_type();
     }
     return f.get(ec);
 }
Exemplo n.º 3
0
 HPX_STD_TUPLE<lcos::future<R0> , lcos::future<R1> , lcos::future<R2> >
 wait_all(lcos::future<R0> f0 , lcos::future<R1> f1 , lcos::future<R2> f2,
     error_code& ec = throws)
 {
     typedef HPX_STD_TUPLE<lcos::future<R0> , lcos::future<R1> , lcos::future<R2> >
         result_type;
     lcos::future<result_type> f = when_all(
         f0 , f1 , f2, ec);
     if (!f.valid()) {
         HPX_THROWS_IF(ec, uninitialized_value, "lcos::wait_all",
             "lcos::when_all didn't return a valid future");
         return result_type();
     }
     return f.get(ec);
 }
Exemplo n.º 4
0
int main(void)
{
#if !(defined(BOOST_MSVC) && BOOST_MSVC < 1700) // Don't bother with VS2010, its result_of can't cope.
    //[barrier_example
    // Assume that groups is 10,000 items long with item.first being randomly
    // between 1 and 500. This example is adapted from the barrier() unit test.
    //
    // What we're going to do is this: for each item in groups, schedule item.first
    // parallel ops and a barrier which completes only when the last of that
    // parallel group completes. Chain the next group to only execute after the
    // preceding group's barrier completes. Repeat until all groups have been executed.
    std::shared_ptr<boost::afio::async_file_io_dispatcher_base> dispatcher=
        boost::afio::make_async_file_io_dispatcher();
    std::vector<std::pair<size_t, int>> groups;
    boost::afio::atomic<size_t> callcount[10000];
    memset(&callcount, 0, sizeof(callcount));
    
    // This lambda is what each parallel op in each group will do: increment an atomic
    // for that group.
    auto inccount = [](boost::afio::atomic<size_t> *count){ (*count)++; };
    
    // This lambda is called after each barrier completes, and it checks that exactly
    // the right number of inccount lambdas were executed.
    auto verifybarrier = [](boost::afio::atomic<size_t> *count, size_t shouldbe)
    {
        if (*count != shouldbe)
            throw std::runtime_error("Count was not what it should have been!");
        return true;
    };
    
    // For each group, dispatch ops and a barrier for them
    boost::afio::async_io_op next;
    bool isfirst = true;
    for(auto &run : groups)
    {
        // Create a vector of run.first size of bound inccount lambdas
        // This will be the batch issued for this group
        std::vector<std::function<void()>> thisgroupcalls(run.first, std::bind(inccount, &callcount[run.second]));
        std::vector<boost::afio::async_io_op> thisgroupcallops;
        // If this is the first item, schedule without precondition
        if (isfirst)
        {
            thisgroupcallops = dispatcher->call(thisgroupcalls).second;
            isfirst = false;
        }
        else
        {
            // Create a vector of run.first size of preconditions exactly
            // matching the number in this batch. Note that the precondition
            // for all of these is the preceding verify op
            std::vector<boost::afio::async_io_op> dependency(run.first, next);
            thisgroupcallops = dispatcher->call(dependency, thisgroupcalls).second;
        }
        // barrier() is very easy: its number of output ops exactly matches its input
        // but none of the output will complete until the last of the input completes
        auto thisgroupbarriered = dispatcher->barrier(thisgroupcallops);
        // Schedule a call of the verify lambda once barrier completes. Here we choose
        // the first item of the barrier's return, but in truth any of them are good.
        auto verify = dispatcher->call(thisgroupbarriered.front(), std::function<bool()>(std::bind(verifybarrier, &callcount[run.second], run.first)));
        // Set the dependency for the next batch to be the just scheduled verify op
        next = verify.second;
    }
    // next was the last op scheduled, so waiting on it waits on everything
    when_all(next).wait();
    //]
#endif
}
Exemplo n.º 5
0
int hpx_main(int argc, char* argv[])
{
    HPX_TEST_EQ(std::size_t(4), hpx::resource::get_num_threads());
    HPX_TEST_EQ(std::size_t(4), hpx::resource::get_num_thread_pools());
    HPX_TEST_EQ(std::size_t(0), hpx::resource::get_pool_index("default"));
    HPX_TEST_EQ(std::size_t(0), hpx::resource::get_pool_index("pool-0"));

    for (int i=0; i<max_threads; ++i) {
        std::string pool_name = "pool-"+std::to_string(i);
        HPX_TEST_EQ(pool_name , hpx::resource::get_pool_name(i));
        HPX_TEST_EQ(std::size_t(1), hpx::resource::get_num_threads(i));
    }

    // setup executors for different task priorities on the pools
    // segfaults or exceptions in any of the following will cause
    // the test to fail
    hpx::threads::scheduled_executor exec_0_hp =
        hpx::threads::executors::pool_executor("default",
        hpx::threads::thread_priority_high);

    hpx::threads::scheduled_executor exec_0 =
        hpx::threads::executors::pool_executor("default",
        hpx::threads::thread_priority_default);

    std::vector<hpx::future<void>> lotsa_futures;

    // use executors to schedule work on pools
    lotsa_futures.push_back(
        hpx::async(exec_0_hp, &dummy_task, 3, "HP default")
    );

    lotsa_futures.push_back(
        hpx::async(exec_0, &dummy_task, 3, "Normal default")
    );

    std::vector<hpx::threads::scheduled_executor> execs;
    std::vector<hpx::threads::scheduled_executor> execs_hp;
    //
    for (int i=0; i<max_threads; ++i) {
        std::string pool_name = "pool-"+std::to_string(i);
        execs.push_back(
            hpx::threads::executors::pool_executor(pool_name,
            hpx::threads::thread_priority_default)
        );
        execs_hp.push_back(
            hpx::threads::executors::pool_executor(pool_name,
            hpx::threads::thread_priority_high)
        );
    }

    for (int i=0; i<max_threads; ++i) {
        std::string pool_name = "pool-"+std::to_string(i);
        lotsa_futures.push_back(
            hpx::async(execs[i], &dummy_task, 3, pool_name + " normal")
        );
        lotsa_futures.push_back(
            hpx::async(execs_hp[i], &dummy_task, 3, pool_name + " HP")
        );
    }

    // check that the default executor still works
    hpx::parallel::execution::default_executor large_stack_executor(
        hpx::threads::thread_stacksize_large);

    lotsa_futures.push_back(
        hpx::async(large_stack_executor, &dummy_task, 3, "true default + large stack")
    );

    // just wait until everything is done
    when_all(lotsa_futures).get();

    return hpx::finalize();
}
Exemplo n.º 6
0
auto when_all(Iterator begin, Iterator end) {
  return when_all(detail::range::persist_range(begin, end));
}