Beispiel #1
0
  void doJob(BlockScope *scope) override {
    try {
      auto visitor =
        (DepthFirstVisitor<When, OptVisitor>*) m_context;
      {
        Lock ldep(BlockScope::s_depsMutex);
        Lock lstate(BlockScope::s_jobStateMutex);
        always_assert(scope->getMark() == BlockScope::MarkReady);
        if (scope->getNumDepsToWaitFor()) {
          scope->setMark(BlockScope::MarkWaiting);
          return;
        }
        scope->setMark(BlockScope::MarkProcessing);
      }

      scope->setForceRerun(false);

      // creates on demand
      AnalysisResult::s_changedScopesMapThreadLocal->clear();
      int useKinds = visitor->visitScope(BlockScopeRawPtr(scope));
      assert(useKinds >= 0);

      {
        Lock l2(BlockScope::s_depsMutex);
        Lock l1(BlockScope::s_jobStateMutex);

        assert(scope->getMark() == BlockScope::MarkProcessing);
        assert(scope->getNumDepsToWaitFor() == 0);

        // re-enqueue changed scopes, regardless of rescheduling exception.
        // this is because we might have made changes to other scopes which we
        // do not undo, so we need to announce their updates
        for (const auto& local : *AnalysisResult::s_changedScopesMapThreadLocal) {
          for (const auto& pf : local.first->getOrderedUsers()) {
            if ((pf->second & GetPhaseInterestMask<When>()) &&
                (pf->second & local.second)) {
              int m = pf->first->getMark();
              switch (m) {
              case BlockScope::MarkWaiting:
              case BlockScope::MarkReady:
                ; // no-op
                break;
              case BlockScope::MarkProcessing:
                pf->first->setForceRerun(true);
                break;
              case BlockScope::MarkProcessed:
                if (visitor->activateScope(pf->first)) {
                  visitor->enqueue(pf->first);
                }
                break;
              default: assert(false);
              }
            }
          }
        }
        AnalysisResult::s_changedScopesMapThreadLocal.destroy();

        useKinds |= scope->rescheduleFlags();
        scope->setRescheduleFlags(0);

        for (const auto& pf : scope->getOrderedUsers()) {
          if (pf->second & GetPhaseInterestMask<When>()) {
            int m = pf->first->getMark();
            if (pf->second & useKinds && m == BlockScope::MarkProcessed) {
              bool ready = visitor->activateScope(pf->first);
              always_assert(!ready);
              m = BlockScope::MarkWaiting;
            }

            if (m == BlockScope::MarkWaiting || m == BlockScope::MarkReady) {
              int nd = pf->first->getNumDepsToWaitFor();
              always_assert(nd >= 1);
              if (!pf->first->decNumDepsToWaitFor() &&
                  m == BlockScope::MarkWaiting) {
                pf->first->setMark(BlockScope::MarkReady);
                visitor->enqueue(pf->first);
              }
            } else if (pf->second & useKinds &&
                       m == BlockScope::MarkProcessing) {
              // This is conservative: If we have a user who is currently
              // processing (yes, this can potentially happen if we add a
              // user *after* the initial dep graph has been formed), then we
              // have no guarantee that the scope read this scope's updates
              // in its entirety. Thus, we must force it to run again in
              // order to be able to observe all the updates.
              always_assert(pf->first->getNumDepsToWaitFor() == 0);
              pf->first->setForceRerun(true);
            }
          }
        }
        scope->setMark(BlockScope::MarkProcessed);
        if (scope->forceRerun()) {
          if (visitor->activateScope(BlockScopeRawPtr(scope))) {
            visitor->enqueue(BlockScopeRawPtr(scope));
          }
        } else {
          for (const auto& p : scope->getDeps()) {
            if (*p.second & GetPhaseInterestMask<When>()) {
              if (p.first->getMark() == BlockScope::MarkProcessing) {
                bool ready = visitor->activateScope(BlockScopeRawPtr(scope));
                always_assert(!ready);
                break;
              }
            }
          }
        }
      }
    } catch (Exception &e) {
      Logger::Error("%s", e.getMessage().c_str());
    }
  }
Beispiel #2
0
  virtual void doJob(BlockScope *scope) {
#ifdef HPHP_INSTRUMENT_PROCESS_PARALLEL
    ++AnalysisResult::s_NumDoJobCalls;
    ConcurrentBlockScopeRawPtrIntHashMap::accessor acc;
    AnalysisResult::s_DoJobUniqueScopes.insert(acc,
      BlockScopeRawPtr(scope));
    acc->second += 1;
#endif /* HPHP_INSTRUMENT_PROCESS_PARALLEL */
    try {
      auto visitor =
        (DepthFirstVisitor<When, OptVisitor>*) m_context;
      {
        Lock ldep(BlockScope::s_depsMutex);
        Lock lstate(BlockScope::s_jobStateMutex);
        always_assert(scope->getMark() == BlockScope::MarkReady);
        if (scope->getNumDepsToWaitFor()) {
          scope->setMark(BlockScope::MarkWaiting);
          return;
        }
        scope->setMark(BlockScope::MarkProcessing);
      }

      scope->setForceRerun(false);
      scope->setNeedsReschedule(false);

      // creates on demand
      AnalysisResult::s_changedScopesMapThreadLocal->clear();
      int useKinds = visitor->visitScope(BlockScopeRawPtr(scope));
      assert(useKinds >= 0);

      {
        Lock l2(BlockScope::s_depsMutex);
        Lock l1(BlockScope::s_jobStateMutex);

        assert(scope->getMark() == BlockScope::MarkProcessing);
        assert(scope->getNumDepsToWaitFor() == 0);
        scope->assertNumDepsSanity();

        // re-enqueue changed scopes, regardless of rescheduling exception.
        // this is because we might have made changes to other scopes which we
        // do not undo, so we need to announce their updates
        BlockScopeRawPtrFlagsHashMap::const_iterator localIt =
          AnalysisResult::s_changedScopesMapThreadLocal->begin();
        BlockScopeRawPtrFlagsHashMap::const_iterator localEnd =
          AnalysisResult::s_changedScopesMapThreadLocal->end();
        for (; localIt != localEnd; ++localIt) {
          const BlockScopeRawPtrFlagsVec &ordered =
            localIt->first->getOrderedUsers();
          for (BlockScopeRawPtrFlagsVec::const_iterator userIt =
                 ordered.begin(), userEnd = ordered.end();
               userIt != userEnd; ++userIt) {
            BlockScopeRawPtrFlagsVec::value_type pf = *userIt;
            if ((pf->second & GetPhaseInterestMask<When>()) &&
                (pf->second & localIt->second)) {
              int m = pf->first->getMark();
              switch (m) {
              case BlockScope::MarkWaiting:
              case BlockScope::MarkReady:
                ; // no-op
                break;
              case BlockScope::MarkProcessing:
#ifdef HPHP_INSTRUMENT_PROCESS_PARALLEL
                ++AnalysisResult::s_NumForceRerunGlobal;
#endif /* HPHP_INSTRUMENT_PROCESS_PARALLEL */
                pf->first->setForceRerun(true);
                break;
              case BlockScope::MarkProcessed:
#ifdef HPHP_INSTRUMENT_PROCESS_PARALLEL
                ++AnalysisResult::s_NumReactivateGlobal;
#endif /* HPHP_INSTRUMENT_PROCESS_PARALLEL */
                if (visitor->activateScope(pf->first)) {
                  visitor->enqueue(pf->first);
                }
                break;
              default: assert(false);
              }
            }
          }
        }
        AnalysisResult::s_changedScopesMapThreadLocal.destroy();

        if (scope->needsReschedule()) {
          // This signals an error in visitScope() which the scope can possibly
          // recover from if run again. an example is a lock contention error
          // (where the scope had to bail out). thus, we simply want to
          // re-enqueue it (w/o activating dependents, since this scope hasn't
          // actually finished running)
          scope->setRescheduleFlags(
              scope->rescheduleFlags() | useKinds);
          if (visitor->activateScope(BlockScopeRawPtr(scope))) {
            visitor->enqueue(BlockScopeRawPtr(scope));
          }
        } else {
          useKinds |= scope->rescheduleFlags();
          scope->setRescheduleFlags(0);

          const BlockScopeRawPtrFlagsVec &ordered = scope->getOrderedUsers();
          for (BlockScopeRawPtrFlagsVec::const_iterator it = ordered.begin(),
               end = ordered.end(); it != end; ++it) {
            BlockScopeRawPtrFlagsVec::value_type pf = *it;
            if (pf->second & GetPhaseInterestMask<When>()) {
              int m = pf->first->getMark();
              if (pf->second & useKinds && m == BlockScope::MarkProcessed) {
#ifdef HPHP_INSTRUMENT_PROCESS_PARALLEL
                ++AnalysisResult::s_NumReactivateUseKinds;
#endif /* HPHP_INSTRUMENT_PROCESS_PARALLEL */
                bool ready = visitor->activateScope(pf->first);
                always_assert(!ready);
                m = BlockScope::MarkWaiting;
              }

              if (m == BlockScope::MarkWaiting || m == BlockScope::MarkReady) {
                int nd = pf->first->getNumDepsToWaitFor();
                always_assert(nd >= 1);
                if (!pf->first->decNumDepsToWaitFor() &&
                    m == BlockScope::MarkWaiting) {
                  pf->first->setMark(BlockScope::MarkReady);
                  visitor->enqueue(pf->first);
                }
              } else if (pf->second & useKinds &&
                         m == BlockScope::MarkProcessing) {
                // This is conservative: If we have a user who is currently
                // processing (yes, this can potentially happen if we add a
                // user *after* the initial dep graph has been formed), then we
                // have no guarantee that the scope read this scope's updates
                // in its entirety. Thus, we must force it to run again in
                // order to be able to observe all the updates.
#ifdef HPHP_INSTRUMENT_PROCESS_PARALLEL
                ++AnalysisResult::s_NumForceRerunUseKinds;
#endif /* HPHP_INSTRUMENT_PROCESS_PARALLEL */
                always_assert(pf->first->getNumDepsToWaitFor() == 0);
                pf->first->setForceRerun(true);
              }
            }
          }
          scope->setMark(BlockScope::MarkProcessed);
          if (scope->forceRerun()) {
            if (visitor->activateScope(BlockScopeRawPtr(scope))) {
              visitor->enqueue(BlockScopeRawPtr(scope));
            }
          } else {
            const BlockScopeRawPtrFlagsPtrVec &deps = scope->getDeps();
            for (BlockScopeRawPtrFlagsPtrVec::const_iterator it = deps.begin(),
                 end = deps.end(); it != end; ++it) {
              const BlockScopeRawPtrFlagsPtrPair &p(*it);
              if (*p.second & GetPhaseInterestMask<When>()) {
                if (p.first->getMark() == BlockScope::MarkProcessing) {
                  bool ready = visitor->activateScope(BlockScopeRawPtr(scope));
                  always_assert(!ready);
                  break;
                }
              }
            }
          }
        }
      }
    } catch (Exception &e) {
      Logger::Error("%s", e.getMessage().c_str());
    }
  }