Example #1
0
void* consumer_routine(void* arg) {
    // notify about start
    _threads.get(CONSUMER)->set_cancel_state_enabled(false);
    _threads.get(CONSUMER)->notify_thread_started(&_mutex);

    // allocate value for result
    int * sum = new int(0);
    // for every update issued by producer, read the value and add to sum
    while (true) {
        _exchanger.start_exchanging(&_mutex);
        _exchanger.wait_for_exchange_started(&_mutex);
        if (!_threads.get(PRODUCER)->is_running()) {
            _exchanger.exchange();
            _exchanger.end_exchanging(&_mutex);
            break;
        }

        *sum += ((Value *) arg)->get();
        _exchanger.exchange();
        _exchanger.end_exchanging(&_mutex);
    }

    _threads.get(CONSUMER)->set_cancel_state_enabled(true);

    // return pointer to result
    _threads.get(CONSUMER)->notify_thread_stopped(&_mutex);

    return sum;
}
Example #2
0
int run_threads() {
    // start 3 _threads and wait until they're done

    std::unique_ptr<Value> value(new Value());

    pthread_mutex_init(&_mutex, nullptr);

    _threads.create_thread(PRODUCER, (void *) producer_routine, (void *) &*value);
    _threads.create_thread(CONSUMER, (void *) consumer_routine, (void *) &*value);
    _threads.create_thread(INTERRUPTOR, (void *) consumer_interruptor_routine, nullptr);

    _threads.run_threads();

    // return sum of update values seen by consumer

    std::shared_ptr<int> result(new int(0));

    pthread_join(_threads.get(PRODUCER)->get(), nullptr);
    pthread_join(_threads.get(CONSUMER)->get(), (void **) &result);
    pthread_join(_threads.get(INTERRUPTOR)->get(), nullptr);

    pthread_mutex_destroy(&_mutex);

    return *result;
}
Example #3
0
void* consumer_interruptor_routine(void* arg) {
    // wait for consumer to start
    _threads.get(CONSUMER)->wait_for_running(&_mutex);

    // interrupt consumer while producer is running
    while (_threads.get(PRODUCER)->is_running()) {
        _threads.get(CONSUMER)->cancel();
    }

    _threads.get(INTERRUPTOR)->notify_thread_stopped(&_mutex);

    return nullptr;
}
Example #4
0
    session(
        thread_pool & main_pool,
        thread_pool & send_pool,
        thread_pool & recv_pool,
        const HandlerType & err_handler)

        : socket_(main_pool.io_service()),
          send_pool_(send_pool),
          recv_pool_(recv_pool),
          reply_strand_(send_pool.io_service()),
          request_strand_(recv_pool.io_service()),
          error_handler_(err_handler)
    {
    }
 void run_synchronous(MemberFunction member_fun, const vertex_set& vset) {
   shared_lvid_counter = 0;
   if (threads.size() <= 1) {
     (this->*(member_fun))(0, vset);
   }
   else {
     // launch the initialization threads
     for(size_t i = 0; i < threads.size(); ++i) {
       boost::function<void(void)> invoke = boost::bind(member_fun, this, i, vset);
       threads.launch(invoke, i);
     }
   }
   // Wait for all threads to finish
   threads.join();
   rmi.barrier();
 } // end of run_synchronous
Example #6
0
    void thread_data::operator delete(void *p, thread_pool& pool)
    {
        if (0 != p)
        {
#ifdef HPX_DEBUG_THREAD_POOL
            using namespace std;    // some systems have memset in namespace std
            memset (p, freed_value, sizeof(thread_data));
#endif
            pool.deallocate(static_cast<thread_data*>(p));
        }
    }
Example #7
0
void* producer_routine(void* arg) {
    // Wait for consumer to start
    _threads.get(CONSUMER)->wait_for_running(&_mutex);
    _threads.get(PRODUCER)->notify_thread_started(&_mutex);
    // Read data, loop through each value and update the value, notify consumer, wait for consumer to process
    int value = 0;
    while (std::cin >> value) {
        _exchanger.start_exchanging(&_mutex);
        _exchanger.init_exchanging();
        ((Value *) arg)->update(value);
        _exchanger.wait_for_exchange_finished(&_mutex);
        _exchanger.end_exchanging(&_mutex);
    }

    _exchanger.start_exchanging(&_mutex);
    _exchanger.init_exchanging();

    _threads.get(PRODUCER)->unsafe_notify_thread_stopped(&_mutex);

    _exchanger.wait_for_exchange_finished(&_mutex);
    _exchanger.end_exchanging(&_mutex);

    return nullptr;
}
Example #8
0
    void task()
    {
        dlog << LINFO << "task start";

        future<int> var;

        var = 1;

        // Here we ask the thread pool to call this->subtask() and this->subtask2().
        // Note that calls to add_task() will return immediately if there is an 
        // available thread to hand the task off to.  However, if there isn't a 
        // thread ready then add_task() blocks until there is such a thread.
        // Also note that since task() is executed within the thread pool (see main() below)
        // calls to add_task() will execute the requested task within the calling thread
        // in cases where the thread pool is full.  This means it is always safe to 
        // spawn subtasks from within another task, which is what we are doing here.
        tp.add_task(*this,&test::subtask,var); // schedule call to this->subtask(var) 
        tp.add_task(*this,&test::subtask2);    // schedule call to this->subtask2() 

        // Since var is a future, this line will wait for the test::subtask task to 
        // finish before allowing us to access the contents of var.  Then var will 
        // return the integer it contains.  In this case result will be assigned 
        // the value 2 since var was incremented by subtask().
        int result = var;
        // print out the result
        dlog << LINFO << "var = " << result;

        // Wait for all the tasks we have started to finish.  Note that
        // wait_for_all_tasks() only waits for tasks which were started 
        // by the calling thread.  So you don't have to worry about other 
        // unrelated parts of your application interfering.  In this case
        // it just waits for subtask2() to finish.
        tp.wait_for_all_tasks();

        dlog << LINFO << "task end" ;
    }
Example #9
0
 void join_all( )
 {
     io_->join_all( );
     if(!same_) rpc_->join_all( );
 }
Example #10
0
 void stop_all( )
 {
     io_->stop( );
     if( !same_ ) rpc_->stop( );
 }
Example #11
0
int main()
{
    // tell the logger to print out everything
    dlog.set_level(LALL);


    dlog << LINFO << "schedule a few tasks";

    test mytask;
    // Schedule the thread pool to call mytask.task().  Note that all forms of add_task()
    // pass in the task object by reference.  This means you must make sure, in this case,
    // that mytask isn't destructed until after the task has finished executing.
    tp.add_task(mytask, &test::task);

    // You can also pass task objects to a thread pool by value.  So in this case we don't
    // have to worry about keeping our own instance of the task.  Here we construct a temporary 
    // add_value object and pass it right in and everything works like it should.
    future<int> num = 3;
    tp.add_task_by_value(add_value(7), num);  // adds 7 to num
    int result = num.get();
    dlog << LINFO << "result = " << result;   // prints result = 10





// uncomment this line if your compiler supports the new C++0x lambda functions
//#define COMPILER_SUPPORTS_CPP0X_LAMBDA_FUNCTIONS
#ifdef COMPILER_SUPPORTS_CPP0X_LAMBDA_FUNCTIONS

    // In the above examples we had to explicitly create task objects which is
    // inconvenient.  If you have a compiler which supports C++0x lambda functions
    // then you can use the following simpler method.

    // make a task which will just log a message
    tp.add_task_by_value([](){
                         dlog << LINFO << "A message from a lambda function running in another thread."; 
                         });

    // Here we make 10 different tasks, each assigns a different value into 
    // the elements of the vector vect.
    std::vector<int> vect(10);
    for (unsigned long i = 0; i < vect.size(); ++i)
    {
        // Make a lambda function which takes vect by reference and i by value.  So what
        // will happen is each assignment statement will run in a thread in the thread_pool.
        tp.add_task_by_value([&vect,i](){
                             vect[i] = i;
                             });
    }
    // Wait for all tasks which were requested by the main thread to complete.
    tp.wait_for_all_tasks();
    for (unsigned long i = 0; i < vect.size(); ++i)
    {
        dlog << LINFO << "vect["<<i<<"]: " << vect[i];
    }
#endif



    /* A possible run of this program might produce the following output (the first column is 
       the time the log message occurred and the value in [] is the thread id for the thread
       that generated the log message):

    1 INFO  [0] main: schedule a few tasks
    1 INFO  [1] main: task start
    1 INFO  [0] main: result = 10
  201 INFO  [2] main: subtask end 
  201 INFO  [1] main: var = 2
  201 INFO  [2] main: A message from a lambda function running in another thread.
  301 INFO  [3] main: subtask2 end 
  301 INFO  [1] main: task end
  301 INFO  [0] main: vect[0]: 0
  301 INFO  [0] main: vect[1]: 1
  301 INFO  [0] main: vect[2]: 2
  301 INFO  [0] main: vect[3]: 3
  301 INFO  [0] main: vect[4]: 4
  301 INFO  [0] main: vect[5]: 5
  301 INFO  [0] main: vect[6]: 6
  301 INFO  [0] main: vect[7]: 7
  301 INFO  [0] main: vect[8]: 8
  301 INFO  [0] main: vect[9]: 9
    */
}
Example #12
0
 void thread_data::operator delete(void *p, thread_pool& pool)
 {
     if (0 != p)
         pool.deallocate(reinterpret_cast<thread_data*>(p));
 }
Example #13
0
 void thread_data::operator delete(void *p, thread_pool& pool)
 {
     if (0 != p)
         pool.deallocate(static_cast<thread_data*>(p));
 }
Example #14
0
void file_manager::safe_add_visited_dir(path const & dir, thread_pool & thread_pool,  suffix_array_builder & sa_builder)
{
    boost::lock_guard<boost::mutex> locker(lock_);
    boost::shared_future<void> dir_future = thread_pool.add_task(boost::bind(&file_manager::get_files_from_dir, this, dir, boost::ref(thread_pool), boost::ref(sa_builder)));
    remaining_dirs_.push(dir_future);
}
Example #15
0
 inline void swap(thread_pool & l, thread_pool & r) {
   l.swap(r);
 }