コード例 #1
0
// Attempt to mmap a file for read-only access.
void MmapFileMap(MemoryMappedFile* self, const char *fn)
{
  TimingScope timing_scope(&g_Stats.m_MmapCalls, &g_Stats.m_MmapTimeCycles);

  MmapFileUnmap(self);

  int fd = open(fn, O_RDONLY);

  if (-1 == fd)
    goto error;

  struct stat stbuf;
  if (0 != fstat(fd, &stbuf))
    goto error;

  self->m_Address    = mmap(NULL, stbuf.st_size, PROT_READ, MAP_FILE|MAP_PRIVATE, fd, 0);
  self->m_Size       = stbuf.st_size;
  self->m_SysData[0] = fd;

  if (self->m_Address)
    return;

error:
  if (-1 != fd)
    close(fd);

  Clear(self);
}
コード例 #2
0
bool DigestCacheSave(DigestCache* self, MemAllocHeap* serialization_heap, const char* tmp_filename)
{
  TimingScope timing_scope(nullptr, &g_Stats.m_DigestCacheSaveTimeCycles);

  BinaryWriter writer;
  BinaryWriterInit(&writer, serialization_heap);

  BinarySegment *main_seg   = BinaryWriterAddSegment(&writer);
  BinarySegment *array_seg  = BinaryWriterAddSegment(&writer);
  BinarySegment *string_seg = BinaryWriterAddSegment(&writer);
  BinaryLocator  array_ptr  = BinarySegmentPosition(array_seg);

  auto save_record = [=](size_t index, const HashRecord* hr)
  {
    const DigestCacheRecord* r = (const DigestCacheRecord*) hr;

    BinarySegmentWriteUint64(array_seg, r->m_Timestamp);
    BinarySegmentWriteUint64(array_seg, r->m_AccessTime);
    BinarySegmentWriteUint32(array_seg, r->m_Hash);
    BinarySegmentWrite(array_seg, &r->m_ContentDigest, sizeof(r->m_ContentDigest));
    BinarySegmentWritePointer(array_seg, BinarySegmentPosition(string_seg));
    BinarySegmentWriteStringData(string_seg, r->m_String);
    BinarySegmentWriteUint32(array_seg, 0); // m_Padding
#if ENABLED(USE_FAST_HASH)
    BinarySegmentWriteUint32(array_seg, 0); // m_Padding
#endif
  };

  HashTableWalk(&self->m_Table, save_record);

  BinarySegmentWriteUint32(main_seg, DigestCacheState::MagicNumber);
  BinarySegmentWriteInt32(main_seg, (int) self->m_Table.m_RecordCount);
  BinarySegmentWritePointer(main_seg, array_ptr);

  // Unmap old state to avoid sharing conflicts on Windows.
  MmapFileUnmap(&self->m_StateFile);
  self->m_State = nullptr;

  bool success = BinaryWriterFlush(&writer, tmp_filename);

  if (success)
  {
    success = RenameFile(tmp_filename, self->m_StateFilename);
  }
  else
  {
    remove(tmp_filename);
  }

  BinaryWriterDestroy(&writer);

  return success;
}
コード例 #3
0
// Unmap an mmaped file from RAM.
void MmapFileUnmap(MemoryMappedFile* self)
{
  if (self->m_Address)
  {
    TimingScope timing_scope(&g_Stats.m_MunmapCalls, &g_Stats.m_MunmapTimeCycles);

    if (0 != munmap(self->m_Address, self->m_Size))
      Croak("munmap(%p, %d) failed: %d", self->m_Address, (int) self->m_Size, errno);

    close((int) self->m_SysData[0]);
  }

  Clear(self);
}
コード例 #4
0
// Unmap an mmaped file from RAM.
void MmapFileUnmap(MemoryMappedFile* self)
{
  TimingScope timing_scope(&g_Stats.m_MmapCalls, &g_Stats.m_MmapTimeCycles);

  if (self->m_Address)
  {
    if (!UnmapViewOfFile(self->m_Address))
    {
      CroakErrno("UnMapViewOfFile() failed");
    }

    HANDLE file    = (HANDLE) self->m_SysData[0];
    HANDLE mapping = (HANDLE) self->m_SysData[1];

    CloseHandle(mapping);
    CloseHandle(file);
  }

  Clear(self);
}
コード例 #5
0
// Attempt to mmap a file for read-only access.
void MmapFileMap(MemoryMappedFile* self, const char *fn)
{
  TimingScope timing_scope(&g_Stats.m_MmapCalls, &g_Stats.m_MmapTimeCycles);

  const DWORD desired_access       = GENERIC_READ;
  const DWORD share_mode           = FILE_SHARE_READ;
  const DWORD creation_disposition = OPEN_EXISTING;
  const DWORD flags                = FILE_ATTRIBUTE_NORMAL;

  HANDLE file = CreateFileA(fn, desired_access, share_mode, NULL, creation_disposition, flags, NULL);

  if (INVALID_HANDLE_VALUE == file)
  {
    return;
  }

  const uint64_t file_size = GetFileSize64(file);

  HANDLE mapping = CreateFileMapping(file, NULL, PAGE_READONLY, DWORD(file_size >> 32), DWORD(file_size), NULL);
  if (nullptr == mapping)
  {
    Log(kError, "CreateFileMapping() failed: %u", GetLastError());
    CloseHandle(file);
    return;
  }

  void* address = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, DWORD(file_size));

  if (nullptr == address)
  {
    Log(kError, "MapViewOfFile() failed: %u", GetLastError());
    CloseHandle(mapping);
    CloseHandle(file);
    return;
  }

  self->m_Address    = address;
  self->m_Size       = (size_t) file_size;
  self->m_SysData[0] = (uintptr_t) file;
  self->m_SysData[1] = (uintptr_t) mapping;
}
コード例 #6
0
ファイル: BuildQueue.cpp プロジェクト: MadFishTheOne/tundra
  static BuildProgress::Enum RunAction(BuildQueue* queue, ThreadState* thread_state, NodeState* node, Mutex* queue_lock)
  {
    const NodeData    *node_data    = node->m_MmapData;
    const char        *cmd_line     = node_data->m_Action;
    const char        *pre_cmd_line = node_data->m_PreAction;

    if (!cmd_line || cmd_line[0] == '\0')
      return BuildProgress::kSucceeded;

    if (node->m_MmapData->m_Flags & NodeData::kFlagExpensive)
    {
      if (queue->m_ExpensiveRunning == queue->m_Config.m_MaxExpensiveCount)
      {
        ParkExpensiveNode(queue, node);
        return BuildProgress::kRunAction;
      }
      else
      {
        ++queue->m_ExpensiveRunning;
      }
    }

    MutexUnlock(queue_lock);

    StatCache         *stat_cache   = queue->m_Config.m_StatCache;
    const char        *annotation   = node_data->m_Annotation;
    int                job_id       = thread_state->m_ThreadIndex;
    int                echo_cmdline = 0 != (queue->m_Config.m_Flags & BuildQueueConfig::kFlagEchoCommandLines);

    // Repack frozen env to pointers on the stack.
    int                env_count    = node_data->m_EnvVars.GetCount();
    EnvVariable*       env_vars     = (EnvVariable*) alloca(env_count * sizeof(EnvVariable));
    for (int i = 0; i < env_count; ++i)
    {
      env_vars[i].m_Name  = node_data->m_EnvVars[i].m_Name;
      env_vars[i].m_Value = node_data->m_EnvVars[i].m_Value;
    }

    for (const FrozenFileAndHash& output_file : node_data->m_OutputFiles)
    {
      PathBuffer output;
      PathInit(&output, output_file.m_Filename);

      if (!MakeDirectoriesForFile(stat_cache, output))
      {
        Log(kError, "failed to create output directories for %s", output_file.m_Filename.Get());
        MutexLock(queue_lock);
        return BuildProgress::kFailed;
      }
    }

    ExecResult result = { 0, false };

    // See if we need to remove the output files before running anything.
    if (0 == (node_data->m_Flags & NodeData::kFlagOverwriteOutputs))
    {
      for (const FrozenFileAndHash& output : node_data->m_OutputFiles)
      {
        Log(kDebug, "Removing output file %s before running action", output.m_Filename.Get());
        remove(output.m_Filename);
        StatCacheMarkDirty(stat_cache, output.m_Filename, output.m_Hash);
      }
    }

    if (pre_cmd_line)
    {
      Log(kSpam, "Launching pre-action process");
      TimingScope timing_scope(&g_Stats.m_ExecCount, &g_Stats.m_ExecTimeCycles);
      result = ExecuteProcess(pre_cmd_line, env_count, env_vars, job_id, echo_cmdline, "(pre-build command)");
      Log(kSpam, "Process return code %d", result.m_ReturnCode);
    }

    if (0 == result.m_ReturnCode)
    {
      Log(kSpam, "Launching process");
      TimingScope timing_scope(&g_Stats.m_ExecCount, &g_Stats.m_ExecTimeCycles);
      result = ExecuteProcess(cmd_line, env_count, env_vars, job_id, echo_cmdline, annotation);
      Log(kSpam, "Process return code %d", result.m_ReturnCode);
    }

    for (const FrozenFileAndHash& output : node_data->m_OutputFiles)
    {
      StatCacheMarkDirty(stat_cache, output.m_Filename, output.m_Hash);
    }

    MutexLock(queue_lock);

    if (result.m_WasSignalled)
    {
      SignalSet("child processes signalled");
    }

    if (0 == result.m_ReturnCode)
    {
      return BuildProgress::kSucceeded;
    }
    else
    {
      // Clean up output files after a failed build unless they are precious.
      if (0 == (NodeData::kFlagPreciousOutputs & node_data->m_Flags))
      {
        for (const FrozenFileAndHash& output : node_data->m_OutputFiles)
        {
          Log(kDebug, "Removing output file %s from failed build", output.m_Filename.Get());
          remove(output.m_Filename);
          StatCacheMarkDirty(stat_cache, output.m_Filename, output.m_Hash);
        }
      }

      return BuildProgress::kFailed;
    }
  }
コード例 #7
0
ファイル: ScanCache.cpp プロジェクト: NocturnDragon/tundra
bool ScanCacheSave(ScanCache* self, const char* fn, MemoryMappedFile* prev_mapping, MemAllocHeap* heap)
{
  TimingScope timing_scope(nullptr, &g_Stats.m_ScanCacheSaveTime);

  MemAllocLinear* scratch = self->m_Allocator;

  MemAllocLinearScope scratch_scope(scratch);

  ScanCacheWriter writer;
  ScanCacheWriterInit(&writer, heap);

  // Save new view of the scan cache
  //
  // Algorithm:
  // 
  // - Get all records from the dynamic table (stuff we put in this session)
  const uint32_t      record_count = self->m_RecordCount;
  ScanCache::Record **dyn_records  = LinearAllocateArray<ScanCache::Record*>(scratch, record_count);

  {
    uint32_t records_out = 0;
    for (uint32_t ti = 0, tsize = self->m_TableSize; ti < tsize; ++ti)
    {
      ScanCache::Record* chain = self->m_Table[ti];
      while (chain)
      {
        dyn_records[records_out++] = chain;
        chain                      = chain->m_Next;
      }
    }

    CHECK(records_out == record_count);
  }

  // - Sort these records in key order (by SHA-1 hash)
  std::sort(dyn_records, dyn_records + record_count, SortRecordsByHash);

  const ScanData       *scan_data      = self->m_FrozenData;
  uint32_t              frozen_count   = scan_data ? scan_data->m_EntryCount : 0;
  const HashDigest     *frozen_digests = scan_data ? scan_data->m_Keys.Get() : nullptr;
  const ScanCacheEntry *frozen_entries = scan_data ? scan_data->m_Data.Get() : nullptr;
  const uint64_t       *frozen_times   = scan_data ? scan_data->m_AccessTimes.Get() : nullptr;
  const uint8_t        *frozen_access  = self->m_FrozenAccess;

  const uint64_t now = time(nullptr);

  // Keep old entries for a week.
  const uint64_t timestamp_cutoff = now - 60 * 60 * 24 * 7;

  auto key_dynamic = [=](size_t index) -> const HashDigest* { return &dyn_records[index]->m_Key; };
  auto key_frozen = [=](size_t index) { return frozen_digests + index; };

  auto save_dynamic = [&writer, dyn_records, now](size_t index)
  {
    SaveRecord(
        &writer,
        &dyn_records[index]->m_Key,
        dyn_records[index]->m_Includes,
        dyn_records[index]->m_IncludeCount,
        dyn_records[index]->m_FileTimestamp,
        now);
  };

  auto save_frozen = [&](size_t index)
  {
    uint64_t timestamp = frozen_times[index];
    if (frozen_access[index])
      timestamp = now;

    if (timestamp > timestamp_cutoff)
    {
      SaveRecord(
          &writer,
          frozen_digests + index, 
          frozen_entries[index].m_IncludedFiles.GetArray(),
          frozen_entries[index].m_IncludedFiles.GetCount(),
          frozen_entries[index].m_FileTimestamp,
          timestamp);
    }
  };

  TraverseSortedArrays(record_count, save_dynamic, key_dynamic, frozen_count, save_frozen, key_frozen);

  // Unmap the previous file from RAM so we can overwrite it on Windows.
  MmapFileUnmap(prev_mapping);

  self->m_FrozenData = nullptr;

  bool result = ScanCacheWriterFlush(&writer, fn);

  ScanCacheWriterDestroy(&writer);

  return result;
}