worker_thread::~worker_thread( )
{
  if ( get_internals( ).thr.joinable( ) ) {
    LOG( ) << "WARNING: Joining worker thread in its destructor.";
    get_internals( ).thr.join( );
  }
  assert( get_internals( ).stopped );
}
Beispiel #2
0
void plugin_unload(struct plugin_handle* plugin)
{
	struct plugin_hub_internals* internals = get_internals(plugin);
	internals->unregister(plugin);
	plugin_unregister_callback_functions(plugin);
	plugin_close(plugin->handle);
	hub_free(plugin);
}
global_thread_pool::~global_thread_pool( )
{
  boost::unique_lock< boost::mutex > l( threads_mt );
  work_data.shutting_down = true;
  for ( auto it = threads.begin( ); it != threads.end( ); ) {
    if ( it->get_internals( ).stopped ) {
      it->get_internals( ).thr.join( );
      it = threads.erase( it );
      continue;
    }
    it->wakeup( );
    ++it;
  }
  while ( threads.size( ) ) {
    auto& thr = threads.back( );
    l.unlock( );
    thr.get_internals( ).thr.join( );
    l.lock( );
    threads.pop_back( );
  }
  assert( work_data.working_threads == 0 );
  assert( work_data.number_threads == 0 );
}
void
worker_thread_impl::loop( )
{
  ++data.number_threads;
  try {
    this_wthread = (thr_queue::worker_thread*) ( this );
    coroutine master_cor;
    global_thread_pool::after_yield_f after_yield_f;
    master_coroutine = &master_cor;
    after_yield      = &after_yield_f;

    OVERLAPPED_ENTRY olapped_entry;

    auto wait_cond = [&] {
      while ( true ) {
        auto wait_time = data.shutting_down ? 0 : INFINITE;
        ULONG removed_entries;
        auto wait_iocp =
          GetQueuedCompletionStatusEx( data.iocp, &olapped_entry, 1, &removed_entries, wait_time, true );
        if ( !wait_iocp ) {
          auto err = GetLastError( );
          if ( err == WAIT_IO_COMPLETION ) {
            continue;
          } else {
            if ( err != WAIT_TIMEOUT ) {
              LOG( ) << "GetQueuedCompletionStatus: " << err;
            }
            return false;
          }
        } else {
          break;
        }
      }
      return true;
    };

    do {
      if ( olapped_entry.lpCompletionKey != data.queue_completionkey ) {
        handle_io_operation( olapped_entry );
      } else {
        do_work( );
      }
    } while ( wait_cond( ) );
  } catch ( std::exception& e ) {
    LOG( ) << "caught when worker thread was stopping: " << e.what( );
  }
  --data.number_threads;
  get_internals( ).stopped = true;
}
Beispiel #5
0
// Used internally only
struct hub_info* plugin_get_hub(struct plugin_handle* plugin)
{
	struct plugin_hub_internals* data = get_internals(plugin);
	return data->hub;
}
void
worker_thread_impl::wakeup( )
{
  QueueUserAPC( []( ULONG_PTR ) {}, get_internals( ).thr.native_handle( ), 0 );
}
Beispiel #7
0
static struct plugin_callback_data* get_callback_data(struct plugin_handle* plugin)
{
	return get_internals(plugin)->callback_data;
}
void
generic_worker_thread::do_work( )
{
  bool could_work;
  int number_units_of_work   = 0;
  bool only_run_thread_queue = false;
  do {
    could_work = false;

    coroutine work_to_do;

    if ( run_next ) {
      work_to_do = std::move( run_next.get( ) );
      run_next   = boost::none;
      goto do_work;
    }

    while ( get_internals( ).thread_queue_size > 0 ) {
      if ( get_internals( ).thread_queue.try_dequeue( work_to_do ) ) {
        --get_internals( ).thread_queue_size;
        goto do_work;
      }
    }

    while ( get_data( ).work_queue_prio_size > 0 && !only_run_thread_queue ) {
      if ( get_data( ).work_queue_prio.try_dequeue_from_producer( internals->ptok_prio, work_to_do ) ||
           get_data( ).work_queue_prio.try_dequeue( work_to_do ) ) {
        --get_data( ).work_queue_prio_size;
        goto do_work;
      }
    }

    while ( get_data( ).work_queue_size > 0 && !only_run_thread_queue ) {
      if ( get_data( ).work_queue.try_dequeue_from_producer( internals->ptok, work_to_do ) ||
           get_data( ).work_queue.try_dequeue( work_to_do ) ) {
        --get_data( ).work_queue_size;
        goto do_work;
      }
      if ( get_data( ).work_queue_prio.try_dequeue_from_producer( internals->ptok_prio, work_to_do ) ||
           get_data( ).work_queue_prio.try_dequeue( work_to_do ) ) {
        --get_data( ).work_queue_prio_size;
        goto do_work;
      }
    }

    could_work = false;
    break;

  do_work:
    ++get_data( ).working_threads;
    BOOST_SCOPE_EXIT_ALL( & )
    {
      --get_data( ).working_threads;
    };
    if ( !work_to_do.can_be_run_by_thread( this_wthread ) ) {
      // we reschedule it and hope it is run by a different thread.
      LOG( ) << "Rescheduling cor: " << work_to_do.get_id( ) << " thr id: " << boost::this_thread::get_id( );
      global_thr_pool.schedule( std::move( work_to_do ), true );
      only_run_thread_queue = true;
      continue;
    }
    ++number_units_of_work;
    could_work        = true;
    running_coroutine = &work_to_do;

    work_to_do.set_forbidden_thread( nullptr );

    work_to_do.switch_to_from( *master_coroutine );
    assert( running_coroutine == &work_to_do );
    if ( *after_yield ) {
      ( *after_yield )( std::move( work_to_do ) );
      *after_yield = nullptr;
    }
    running_coroutine = master_coroutine;

    // if we think that other threads are waiting apart
    // from this one, we wake them up.
    global_thr_pool.plat_wakeup_threads( );
  } while ( could_work );
  LOG( ) << "Thread " << boost::this_thread::get_id( ) << " performed " << number_units_of_work;
}