예제 #1
0
static void do_parallel_map(Stopwatch& sw) {
  // Create a bunch of subcontexts from here and put them in a map
  auto all = create();
  ::ContextMap<size_t> mp;
  for (size_t i = N; i < all.size(); i++)
    mp.Add(i, all[i]);

  // Now the threads that will make progress hard:
  auto proceed = std::make_shared<bool>(true);
  for (size_t i = N; i--;)
    std::thread([proceed, mp] {
      while (*proceed)
        for (const auto& cur : mp)
          ;
    }).detach();
  auto cleanup = MakeAtExit([&] { *proceed = false; });

  std::this_thread::sleep_for(std::chrono::milliseconds(100));

  // Enumerate the map while iteration is underway
  sw.Start();
  for (auto& cur : mp)
    ;
  sw.Stop(n);
}
예제 #2
0
TEST_F(CoreContextTest, AppropriateShutdownInterleave) {
  // Need both an outer and an inner context
  AutoCurrentContext ctxtOuter;
  AutoCreateContext ctxtInner;

  // Need to inject types at both scopes
  AutoRequired<ExplicitlyHoldsOutstandingCount> outer(ctxtOuter);
  AutoRequired<ExplicitlyHoldsOutstandingCount> inner(ctxtInner);

  // Start both contexts up
  ctxtOuter->Initiate();
  ctxtInner->Initiate();

  // Now shut down the outer context.  Hand off to an async, we want this to block.
  std::thread holder{
    [ctxtOuter] {
      ctxtOuter->SignalShutdown(true);
    }
  };
  auto holderClean = MakeAtExit([&holder] { holder.join(); });

  // Need to ensure that both outstanding counters are reset at some point:
  {
    auto cleanup = MakeAtExit([&] {
      outer->Proceed();
      inner->Proceed();
    });

    // Outer entry should have called "stop":
    auto future = outer->calledStop.get_future();
    ASSERT_EQ(
      std::future_status::ready,
      future.wait_for(std::chrono::seconds(5))
    ) << "Outer scope's OnStop method was incorrectly blocked by a child context member taking a long time to shut down";
  }

  // Both contexts should be stopped now:
  ASSERT_TRUE(ctxtOuter->Wait(std::chrono::seconds(5))) << "Outer context did not tear down in a timely fashion";
  ASSERT_TRUE(ctxtOuter->IsQuiescent()) << "Quiescence not achieved by outer context after shutdown";
  ASSERT_TRUE(ctxtInner->Wait(std::chrono::seconds(5))) << "Inner context did not tear down in a timely fashion";
}
예제 #3
0
Benchmark PriorityBoost::CanBoostPriority(void) {
  AutoCurrentContext ctxt;

  // Create two spinners and kick them off at the same time:
  AutoRequired<JustIncrementsANumber<ThreadPriority::BelowNormal>> lower;
  AutoRequired<JustIncrementsANumber<ThreadPriority::Normal>> higher;
  ctxt->Initiate();

#ifdef _MSC_VER
  // We want all of our threads to run on ONE cpu for awhile, and then we want to put it back at exit
  DWORD_PTR originalAffinity, systemAffinity;
  GetProcessAffinityMask(GetCurrentProcess(), &originalAffinity, &systemAffinity);
  SetProcessAffinityMask(GetCurrentProcess(), 1);
  auto onreturn = MakeAtExit([originalAffinity] {
    SetProcessAffinityMask(GetCurrentProcess(), originalAffinity);
  });
#else
  // TODO:  Implement on Unix so that this benchmark is trustworthy
#endif

  // Poke the conditional variable a lot:
  AutoRequired<std::mutex> contended;
  for(size_t i = 100; i--;) {
    // We sleep while holding contention lock to force waiting threads into the sleep queue.  The reason we have to do
    // this is due to the way that mutex is implemented under the hood.  The STL mutex uses a high-frequency variable
    // and attempts to perform a CAS (check-and-set) on this variable.  If it succeeds, the lock is obtained; if it
    // fails, it will put the thread into a non-ready state by calling WaitForSingleObject on Windows or one of the
    // mutex_lock methods on Unix.
    //
    // When a thread can't be run, it's moved from the OS's ready queue to the sleep queue.  The scheduler knows that
    // the thread can be moved back to the ready queue if a particular object is signalled, but in the case of a lock,
    // only one of the threads waiting on the object can actually be moved to the ready queue.  It's at THIS POINT that
    // the operating system consults the thread priority--if only thread can be moved over, then the highest priority
    // thread will wind up in the ready queue every time.
    //
    // Thread priority does _not_ necessarily influence the amount of time the scheduler allocates allocated to a ready
    // thread with respect to other threads of the same process.  This is why we hold the lock for a full millisecond,
    // in order to force the thread over to the sleep queue and ensure that the priority resolution mechanism is
    // directly tested.
    std::lock_guard<std::mutex> lk(*contended);
    std::this_thread::sleep_for(std::chrono::milliseconds(1));
  }

  // Need to terminate before we try running a comparison.
  ctxt->SignalTerminate();

  return Benchmark {
    {"Low priority CPU time", std::chrono::nanoseconds{lower->val}},
    {"High priority CPU time", std::chrono::nanoseconds{higher->val}},
  };
}
예제 #4
0
static void do_parallel_enum(Stopwatch& sw) {
  AutoCurrentContext ctxt;
  auto all = create();

  // Create threads which will cause contention:
  auto proceed = std::make_shared<bool>(true);
  for (size_t nParallel = N; nParallel--;) {
    std::thread([proceed, all, ctxt] {
      while (*proceed)
        for (auto cur : ContextEnumerator(ctxt))
          ;
    }).detach();
  }
  auto cleanup = MakeAtExit([&] { *proceed = false; });

  std::this_thread::sleep_for(std::chrono::milliseconds(100));

  // Perform parallel enumeration
  sw.Start();
  for (auto cur : ContextEnumerator(ctxt))
    ;
  sw.Stop(n);
}