Example #1
0
TEST_F(ThreadPoolTest, Async) {
  CHECK_UNSUPPORTED();
  ThreadPool Pool;
  std::atomic_int i{0};
  Pool.async([this, &i] {
    waitForMainThread();
    ++i;
  });
  Pool.async([&i] { ++i; });
  ASSERT_NE(2, i.load());
  setMainThreadReady();
  Pool.wait();
  ASSERT_EQ(2, i.load());
}
Example #2
0
TEST_F(ThreadPoolTest, AsyncBarrierArgs) {
  CHECK_UNSUPPORTED();
  // Test that async works with a function requiring multiple parameters.
  std::atomic_int checked_in{0};

  ThreadPool Pool;
  for (size_t i = 0; i < 5; ++i) {
    Pool.async(TestFunc, std::ref(checked_in), i);
  }
  Pool.wait();
  ASSERT_EQ(10, checked_in);
}
Example #3
0
TEST_F(ThreadPoolTest, PoolDestruction) {
  CHECK_UNSUPPORTED();
  // Test that we are waiting on destruction
  std::atomic_int checked_in{0};
  {
    ThreadPool Pool;
    for (size_t i = 0; i < 5; ++i) {
      Pool.async([this, &checked_in, i] {
        waitForMainThread();
        ++checked_in;
      });
    }
    ASSERT_EQ(0, checked_in);
    setMainThreadReady();
  }
  ASSERT_EQ(5, checked_in);
}
Example #4
0
TEST_F(ThreadPoolTest, AsyncBarrier) {
  CHECK_UNSUPPORTED();
  // test that async & barrier work together properly.

  std::atomic_int checked_in{0};

  ThreadPool Pool;
  for (size_t i = 0; i < 5; ++i) {
    Pool.async([this, &checked_in, i] {
      waitForMainThread();
      ++checked_in;
    });
  }
  ASSERT_EQ(0, checked_in);
  setMainThreadReady();
  Pool.wait();
  ASSERT_EQ(5, checked_in);
}
Example #5
0
int CodeCoverageTool::show(int argc, const char **argv,
                           CommandLineParserType commandLineParser) {

  cl::OptionCategory ViewCategory("Viewing options");

  cl::opt<bool> ShowLineExecutionCounts(
      "show-line-counts", cl::Optional,
      cl::desc("Show the execution counts for each line"), cl::init(true),
      cl::cat(ViewCategory));

  cl::opt<bool> ShowRegions(
      "show-regions", cl::Optional,
      cl::desc("Show the execution counts for each region"),
      cl::cat(ViewCategory));

  cl::opt<bool> ShowBestLineRegionsCounts(
      "show-line-counts-or-regions", cl::Optional,
      cl::desc("Show the execution counts for each line, or the execution "
               "counts for each region on lines that have multiple regions"),
      cl::cat(ViewCategory));

  cl::opt<bool> ShowExpansions("show-expansions", cl::Optional,
                               cl::desc("Show expanded source regions"),
                               cl::cat(ViewCategory));

  cl::opt<bool> ShowInstantiations("show-instantiations", cl::Optional,
                                   cl::desc("Show function instantiations"),
                                   cl::cat(ViewCategory));

  cl::opt<std::string> ShowOutputDirectory(
      "output-dir", cl::init(""),
      cl::desc("Directory in which coverage information is written out"));
  cl::alias ShowOutputDirectoryA("o", cl::desc("Alias for --output-dir"),
                                 cl::aliasopt(ShowOutputDirectory));

  cl::opt<uint32_t> TabSize(
      "tab-size", cl::init(2),
      cl::desc(
          "Set tab expansion size for html coverage reports (default = 2)"));

  cl::opt<std::string> ProjectTitle(
      "project-title", cl::Optional,
      cl::desc("Set project title for the coverage report"));

  auto Err = commandLineParser(argc, argv);
  if (Err)
    return Err;

  ViewOpts.ShowLineNumbers = true;
  ViewOpts.ShowLineStats = ShowLineExecutionCounts.getNumOccurrences() != 0 ||
                           !ShowRegions || ShowBestLineRegionsCounts;
  ViewOpts.ShowRegionMarkers = ShowRegions || ShowBestLineRegionsCounts;
  ViewOpts.ShowLineStatsOrRegionMarkers = ShowBestLineRegionsCounts;
  ViewOpts.ShowExpandedRegions = ShowExpansions;
  ViewOpts.ShowFunctionInstantiations = ShowInstantiations;
  ViewOpts.ShowOutputDirectory = ShowOutputDirectory;
  ViewOpts.TabSize = TabSize;
  ViewOpts.ProjectTitle = ProjectTitle;

  if (ViewOpts.hasOutputDirectory()) {
    if (auto E = sys::fs::create_directories(ViewOpts.ShowOutputDirectory)) {
      error("Could not create output directory!", E.message());
      return 1;
    }
  }

  sys::fs::file_status Status;
  if (sys::fs::status(PGOFilename, Status)) {
    error("profdata file error: can not get the file status. \n");
    return 1;
  }

  auto ModifiedTime = Status.getLastModificationTime();
  std::string ModifiedTimeStr = to_string(ModifiedTime);
  size_t found = ModifiedTimeStr.rfind(":");
  ViewOpts.CreatedTimeStr = (found != std::string::npos)
                                ? "Created: " + ModifiedTimeStr.substr(0, found)
                                : "Created: " + ModifiedTimeStr;

  auto Coverage = load();
  if (!Coverage)
    return 1;

  auto Printer = CoveragePrinter::create(ViewOpts);

  if (!Filters.empty()) {
    auto OSOrErr = Printer->createViewFile("functions", /*InToplevel=*/true);
    if (Error E = OSOrErr.takeError()) {
      error("Could not create view file!", toString(std::move(E)));
      return 1;
    }
    auto OS = std::move(OSOrErr.get());

    // Show functions.
    for (const auto &Function : Coverage->getCoveredFunctions()) {
      if (!Filters.matches(Function))
        continue;

      auto mainView = createFunctionView(Function, *Coverage);
      if (!mainView) {
        warning("Could not read coverage for '" + Function.Name + "'.");
        continue;
      }

      mainView->print(*OS.get(), /*WholeFile=*/false, /*ShowSourceName=*/true);
    }

    Printer->closeViewFile(std::move(OS));
    return 0;
  }

  // Show files
  bool ShowFilenames =
      (SourceFiles.size() != 1) || ViewOpts.hasOutputDirectory() ||
      (ViewOpts.Format == CoverageViewOptions::OutputFormat::HTML);

  if (SourceFiles.empty())
    // Get the source files from the function coverage mapping.
    for (StringRef Filename : Coverage->getUniqueSourceFiles())
      SourceFiles.push_back(Filename);

  // Create an index out of the source files.
  if (ViewOpts.hasOutputDirectory()) {
    if (Error E = Printer->createIndexFile(SourceFiles, *Coverage)) {
      error("Could not create index file!", toString(std::move(E)));
      return 1;
    }
  }

  // FIXME: Sink the hardware_concurrency() == 1 check into ThreadPool.
  if (!ViewOpts.hasOutputDirectory() ||
      std::thread::hardware_concurrency() == 1) {
    for (const std::string &SourceFile : SourceFiles)
      writeSourceFileView(SourceFile, Coverage.get(), Printer.get(),
                          ShowFilenames);
  } else {
    // In -output-dir mode, it's safe to use multiple threads to print files.
    ThreadPool Pool;
    for (const std::string &SourceFile : SourceFiles)
      Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile,
                 Coverage.get(), Printer.get(), ShowFilenames);
    Pool.wait();
  }

  return 0;
}
Example #6
0
// Main entry point for the ThinLTO processing
void ThinLTOCodeGenerator::run() {
  if (CodeGenOnly) {
    // Perform only parallel codegen and return.
    ThreadPool Pool;
    assert(ProducedBinaries.empty() && "The generator should not be reused");
    ProducedBinaries.resize(Modules.size());
    int count = 0;
    for (auto &ModuleBuffer : Modules) {
      Pool.async([&](int count) {
        LLVMContext Context;
        Context.setDiscardValueNames(LTODiscardValueNames);

        // Parse module now
        auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false);

        // CodeGen
        ProducedBinaries[count] = codegen(*TheModule);
      }, count++);
    }

    return;
  }

  // Sequential linking phase
  auto Index = linkCombinedIndex();

  // Save temps: index.
  if (!SaveTempsDir.empty()) {
    auto SaveTempPath = SaveTempsDir + "index.bc";
    std::error_code EC;
    raw_fd_ostream OS(SaveTempPath, EC, sys::fs::F_None);
    if (EC)
      report_fatal_error(Twine("Failed to open ") + SaveTempPath +
                         " to save optimized bitcode\n");
    WriteIndexToFile(*Index, OS);
  }

  // Prepare the resulting object vector
  assert(ProducedBinaries.empty() && "The generator should not be reused");
  ProducedBinaries.resize(Modules.size());

  // Prepare the module map.
  auto ModuleMap = generateModuleMap(Modules);
  auto ModuleCount = Modules.size();

  // Collect for each module the list of function it defines (GUID -> Summary).
  StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
  Index->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);

  // Collect the import/export lists for all modules from the call-graph in the
  // combined index.
  StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
  StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
  ComputeCrossModuleImport(*Index, ModuleToDefinedGVSummaries, ImportLists,
                           ExportLists);

  // Convert the preserved symbols set from string to GUID, this is needed for
  // computing the caching hash and the internalization.
  auto GUIDPreservedSymbols =
      computeGUIDPreservedSymbols(PreservedSymbols, TMBuilder.TheTriple);

  // We use a std::map here to be able to have a defined ordering when
  // producing a hash for the cache entry.
  // FIXME: we should be able to compute the caching hash for the entry based
  // on the index, and nuke this map.
  StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;

  // Resolve LinkOnce/Weak symbols, this has to be computed early because it
  // impacts the caching.
  resolveWeakForLinkerInIndex(*Index, ResolvedODR);

  auto isExported = [&](StringRef ModuleIdentifier, GlobalValue::GUID GUID) {
    const auto &ExportList = ExportLists.find(ModuleIdentifier);
    return (ExportList != ExportLists.end() &&
            ExportList->second.count(GUID)) ||
           GUIDPreservedSymbols.count(GUID);
  };

  // Use global summary-based analysis to identify symbols that can be
  // internalized (because they aren't exported or preserved as per callback).
  // Changes are made in the index, consumed in the ThinLTO backends.
  thinLTOInternalizeAndPromoteInIndex(*Index, isExported);

  // Make sure that every module has an entry in the ExportLists and
  // ResolvedODR maps to enable threaded access to these maps below.
  for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
    ExportLists[DefinedGVSummaries.first()];
    ResolvedODR[DefinedGVSummaries.first()];
  }

  // Compute the ordering we will process the inputs: the rough heuristic here
  // is to sort them per size so that the largest module get schedule as soon as
  // possible. This is purely a compile-time optimization.
  std::vector<int> ModulesOrdering;
  ModulesOrdering.resize(Modules.size());
  std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
  std::sort(ModulesOrdering.begin(), ModulesOrdering.end(),
            [&](int LeftIndex, int RightIndex) {
              auto LSize = Modules[LeftIndex].getBufferSize();
              auto RSize = Modules[RightIndex].getBufferSize();
              return LSize > RSize;
            });

  // Parallel optimizer + codegen
  {
    ThreadPool Pool(ThreadCount);
    for (auto IndexCount : ModulesOrdering) {
      auto &ModuleBuffer = Modules[IndexCount];
      Pool.async([&](int count) {
        auto ModuleIdentifier = ModuleBuffer.getBufferIdentifier();
        auto &ExportList = ExportLists[ModuleIdentifier];

        auto &DefinedFunctions = ModuleToDefinedGVSummaries[ModuleIdentifier];

        // The module may be cached, this helps handling it.
        ModuleCacheEntry CacheEntry(CacheOptions.Path, *Index, ModuleIdentifier,
                                    ImportLists[ModuleIdentifier], ExportList,
                                    ResolvedODR[ModuleIdentifier],
                                    DefinedFunctions, GUIDPreservedSymbols);

        {
          auto ErrOrBuffer = CacheEntry.tryLoadingBuffer();
          DEBUG(dbgs() << "Cache " << (ErrOrBuffer ? "hit" : "miss") << " '"
                       << CacheEntry.getEntryPath() << "' for buffer " << count
                       << " " << ModuleIdentifier << "\n");

          if (ErrOrBuffer) {
            // Cache Hit!
            ProducedBinaries[count] = std::move(ErrOrBuffer.get());
            return;
          }
        }

        LLVMContext Context;
        Context.setDiscardValueNames(LTODiscardValueNames);
        Context.enableDebugTypeODRUniquing();
        auto DiagFileOrErr = setupOptimizationRemarks(Context, count);
        if (!DiagFileOrErr) {
          errs() << "Error: " << toString(DiagFileOrErr.takeError()) << "\n";
          report_fatal_error("ThinLTO: Can't get an output file for the "
                             "remarks");
        }

        // Parse module now
        auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false);

        // Save temps: original file.
        saveTempBitcode(*TheModule, SaveTempsDir, count, ".0.original.bc");

        auto &ImportList = ImportLists[ModuleIdentifier];
        // Run the main process now, and generates a binary
        auto OutputBuffer = ProcessThinLTOModule(
            *TheModule, *Index, ModuleMap, *TMBuilder.create(), ImportList,
            ExportList, GUIDPreservedSymbols,
            ModuleToDefinedGVSummaries[ModuleIdentifier], CacheOptions,
            DisableCodeGen, SaveTempsDir, count);

        OutputBuffer = CacheEntry.write(std::move(OutputBuffer));
        ProducedBinaries[count] = std::move(OutputBuffer);
      }, IndexCount);
    }
  }

  CachePruning(CacheOptions.Path)
      .setPruningInterval(std::chrono::seconds(CacheOptions.PruningInterval))
      .setEntryExpiration(std::chrono::seconds(CacheOptions.Expiration))
      .setMaxSize(CacheOptions.MaxPercentageOfAvailableSpace)
      .prune();

  // If statistics were requested, print them out now.
  if (llvm::AreStatisticsEnabled())
    llvm::PrintStatistics();
}
// Main entry point for the ThinLTO processing
void ThinLTOCodeGenerator::run() {
  if (CodeGenOnly) {
    // Perform only parallel codegen and return.
    ThreadPool Pool;
    assert(ProducedBinaries.empty() && "The generator should not be reused");
    ProducedBinaries.resize(Modules.size());
    int count = 0;
    for (auto &ModuleBuffer : Modules) {
      Pool.async([&](int count) {
        LLVMContext Context;
        Context.setDiscardValueNames(LTODiscardValueNames);

        // Parse module now
        auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false);

        // CodeGen
        ProducedBinaries[count] = codegen(*TheModule);
      }, count++);
    }

    return;
  }

  // Sequential linking phase
  auto Index = linkCombinedIndex();

  // Save temps: index.
  if (!SaveTempsDir.empty()) {
    auto SaveTempPath = SaveTempsDir + "index.bc";
    std::error_code EC;
    raw_fd_ostream OS(SaveTempPath, EC, sys::fs::F_None);
    if (EC)
      report_fatal_error(Twine("Failed to open ") + SaveTempPath +
                         " to save optimized bitcode\n");
    WriteIndexToFile(*Index, OS);
  }

  // Prepare the resulting object vector
  assert(ProducedBinaries.empty() && "The generator should not be reused");
  ProducedBinaries.resize(Modules.size());

  // Prepare the module map.
  auto ModuleMap = generateModuleMap(Modules);
  auto ModuleCount = Modules.size();

  // Collect the import/export lists for all modules from the call-graph in the
  // combined index.
  StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
  StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
  ComputeCrossModuleImport(*Index, ImportLists, ExportLists);

  // Parallel optimizer + codegen
  {
    ThreadPool Pool(ThreadCount);
    int count = 0;
    for (auto &ModuleBuffer : Modules) {
      Pool.async([&](int count) {
        LLVMContext Context;
        Context.setDiscardValueNames(LTODiscardValueNames);

        // Parse module now
        auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false);

        // Save temps: original file.
        if (!SaveTempsDir.empty()) {
          saveTempBitcode(*TheModule, SaveTempsDir, count, ".0.original.bc");
        }

        auto &ImportList = ImportLists[TheModule->getModuleIdentifier()];
        ProducedBinaries[count] = ProcessThinLTOModule(
            *TheModule, *Index, ModuleMap, *TMBuilder.create(), ImportList,
            CacheOptions, DisableCodeGen, SaveTempsDir, count);
      }, count);
      count++;
    }
  }

  // If statistics were requested, print them out now.
  if (llvm::AreStatisticsEnabled())
    llvm::PrintStatistics();
}