Exemplo n.º 1
0
bool HexDoc::SaveRange(LPCSTR filename, uint64 begin, uint64 length)
{
    HANDLE hFile = CreateFile(filename, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL);
    if (hFile == INVALID_HANDLE_VALUE)
        return false;

    bool rc = WriteRange(hFile, begin, length);
    CloseHandle(hFile);
    return rc;
}
Exemplo n.º 2
0
static HRESULT WriteRange(IInStream *inStream, 
    ISequentialOutStream *outStream, 
    UInt64 position,
    UInt64 size,
    IProgress *progress,
    UInt64 &currentComplexity)
{
  inStream->Seek(position, STREAM_SEEK_SET, 0);
  return WriteRange(inStream, outStream, 
    size, progress, currentComplexity);
}
Exemplo n.º 3
0
static void
rangeaccess(void *addr, uintptr size, uintptr step, uintptr callpc, uintptr pc, bool write)
{
	uintptr racectx;

	if(!onstack((uintptr)addr)) {
		m->racecall = true;
		racectx = g->racectx;
		if(callpc) {
			if(callpc == (uintptr)runtime·lessstack)
				runtime·callers(3, &callpc, 1);
			runtime∕race·FuncEnter(racectx, (void*)callpc);
		}
		if(write)
			runtime∕race·WriteRange(racectx, addr, size, step, (void*)pc);
		else
			runtime∕race·ReadRange(racectx, addr, size, step, (void*)pc);
		if(callpc)
			runtime∕race·FuncExit(racectx);
		m->racecall = false;
	}
}
HRESULT UpdateVolume(
    IInStream *inStream,
    const CArchiveDatabaseEx *database,
    CObjectVector<CUpdateItem> &updateItems,
    ISequentialOutStream *seqOutStream,
    IArchiveUpdateCallback *updateCallback,
    const CUpdateOptions &options)
{
  if (updateItems.Size() != 1)
    return E_NOTIMPL;

  CMyComPtr<IArchiveUpdateCallback2> volumeCallback;
  RINOK(updateCallback->QueryInterface(IID_IArchiveUpdateCallback2, (void **)&volumeCallback));
  if (!volumeCallback)
    return E_NOTIMPL;

  CMyComPtr<ISequentialInStream> fileStream;
  HRESULT result = updateCallback->GetStream(0, &fileStream);
  if (result != S_OK && result != S_FALSE)
    return result;
  if (result == S_FALSE)
    return E_FAIL;
  
  CFileItem file;
  
  const CUpdateItem &updateItem = updateItems[0];
  if (updateItem.NewProperties)
    FromUpdateItemToFileItem(updateItem, file);
  else
    file = database->Files[updateItem.IndexInArchive];
  if (file.IsAnti || file.IsDirectory)
    return E_FAIL;

  UInt64 complexity = 0;
  file.IsStartPosDefined = true;
  file.StartPos = 0;
  for (UInt64 volumeIndex = 0; true; volumeIndex++)
  { 
    UInt64 volSize;
    RINOK(volumeCallback->GetVolumeSize(volumeIndex, &volSize));
    UInt64 pureSize = COutArchive::GetVolPureSize(volSize, file.Name.Length(), true);
    CMyComPtr<ISequentialOutStream> volumeStream;
    RINOK(volumeCallback->GetVolumeStream(volumeIndex, &volumeStream));
   
    COutArchive archive;
    RINOK(archive.Create(volumeStream, true));
    RINOK(archive.SkeepPrefixArchiveHeader());
        
    CSequentialInStreamWithCRC *inCrcStreamSpec = new CSequentialInStreamWithCRC;
    CMyComPtr<ISequentialInStream> inCrcStream = inCrcStreamSpec;
    inCrcStreamSpec->Init(fileStream);

    RINOK(WriteRange(inCrcStream, volumeStream, pureSize, updateCallback, complexity));
    file.UnPackSize = inCrcStreamSpec->GetSize();
    if (file.UnPackSize == 0)
      break;
    file.FileCRC = inCrcStreamSpec->GetCRC();
    RINOK(WriteVolumeHeader(archive, file, options));
    file.StartPos += file.UnPackSize;
    if (file.UnPackSize < pureSize)
      break;
  }
  return S_OK;
}
static HRESULT Update2(
    DECL_EXTERNAL_CODECS_LOC_VARS
    IInStream *inStream,
    const CArchiveDatabaseEx *database,
    const CObjectVector<CUpdateItem> &updateItems,
    ISequentialOutStream *seqOutStream,
    IArchiveUpdateCallback *updateCallback,
    const CUpdateOptions &options)
{
  UInt64 numSolidFiles = options.NumSolidFiles;
  if (numSolidFiles == 0)
    numSolidFiles = 1;
  /*
  CMyComPtr<IOutStream> outStream;
  RINOK(seqOutStream->QueryInterface(IID_IOutStream, (void **)&outStream));
  if (!outStream)
    return E_NOTIMPL;
  */

  UInt64 startBlockSize = database != 0 ? database->ArchiveInfo.StartPosition: 0;
  if (startBlockSize > 0 && !options.RemoveSfxBlock)
  {
    RINOK(WriteRange(inStream, seqOutStream, 0, startBlockSize, NULL));
  }

  CRecordVector<int> fileIndexToUpdateIndexMap;
  if (database != 0)
  {
    fileIndexToUpdateIndexMap.Reserve(database->Files.Size());
    for (int i = 0; i < database->Files.Size(); i++)
      fileIndexToUpdateIndexMap.Add(-1);
  }
  int i;
  for(i = 0; i < updateItems.Size(); i++)
  {
    int index = updateItems[i].IndexInArchive;
    if (index != -1)
      fileIndexToUpdateIndexMap[index] = i;
  }

  CRecordVector<int> folderRefs;
  if (database != 0)
  {
    for(i = 0; i < database->Folders.Size(); i++)
    {
      CNum indexInFolder = 0;
      CNum numCopyItems = 0;
      CNum numUnPackStreams = database->NumUnPackStreamsVector[i];
      for (CNum fileIndex = database->FolderStartFileIndex[i];
      indexInFolder < numUnPackStreams; fileIndex++)
      {
        if (database->Files[fileIndex].HasStream)
        {
          indexInFolder++;
          int updateIndex = fileIndexToUpdateIndexMap[fileIndex];
          if (updateIndex >= 0)
            if (!updateItems[updateIndex].NewData)
              numCopyItems++;
        }
      }
      if (numCopyItems != numUnPackStreams && numCopyItems != 0)
        return E_NOTIMPL; // It needs repacking !!!
      if (numCopyItems > 0)
        folderRefs.Add(i);
    }
    folderRefs.Sort(CompareFolderRefs, (void *)database);
  }

  CArchiveDatabase newDatabase;

  ////////////////////////////

  COutArchive archive;
  RINOK(archive.Create(seqOutStream, false));
  RINOK(archive.SkeepPrefixArchiveHeader());
  UInt64 complexity = 0;
  for(i = 0; i < folderRefs.Size(); i++)
    complexity += database->GetFolderFullPackSize(folderRefs[i]);
  UInt64 inSizeForReduce = 0;
  for(i = 0; i < updateItems.Size(); i++)
  {
    const CUpdateItem &updateItem = updateItems[i];
    if (updateItem.NewData)
    {
      complexity += updateItem.Size;
      if (numSolidFiles == 1)
      {
        if (updateItem.Size > inSizeForReduce)
          inSizeForReduce = updateItem.Size;
      }
      else
        inSizeForReduce += updateItem.Size;
    }
  }
  RINOK(updateCallback->SetTotal(complexity));
  complexity = 0;
  RINOK(updateCallback->SetCompleted(&complexity));


  CLocalProgress *lps = new CLocalProgress;
  CMyComPtr<ICompressProgressInfo> progress = lps;
  lps->Init(updateCallback, true);

  /////////////////////////////////////////
  // Write Copy Items

  for(i = 0; i < folderRefs.Size(); i++)
  {
    int folderIndex = folderRefs[i];
    
    lps->ProgressOffset = complexity;
    UInt64 packSize = database->GetFolderFullPackSize(folderIndex);
    RINOK(WriteRange(inStream, archive.SeqStream,
        database->GetFolderStreamPos(folderIndex, 0), packSize, progress));
    complexity += packSize;
    
    const CFolder &folder = database->Folders[folderIndex];
    CNum startIndex = database->FolderStartPackStreamIndex[folderIndex];
    for (int j = 0; j < folder.PackStreams.Size(); j++)
    {
      newDatabase.PackSizes.Add(database->PackSizes[startIndex + j]);
      // newDatabase.PackCRCsDefined.Add(database.PackCRCsDefined[startIndex + j]);
      // newDatabase.PackCRCs.Add(database.PackCRCs[startIndex + j]);
    }
    newDatabase.Folders.Add(folder);

    CNum numUnPackStreams = database->NumUnPackStreamsVector[folderIndex];
    newDatabase.NumUnPackStreamsVector.Add(numUnPackStreams);

    CNum indexInFolder = 0;
    for (CNum fi = database->FolderStartFileIndex[folderIndex];
        indexInFolder < numUnPackStreams; fi++)
    {
      CFileItem file = database->Files[fi];
      if (file.HasStream)
      {
        indexInFolder++;
        int updateIndex = fileIndexToUpdateIndexMap[fi];
        if (updateIndex >= 0)
        {
          const CUpdateItem &updateItem = updateItems[updateIndex];
          if (updateItem.NewProperties)
          {
            CFileItem file2;
            FromUpdateItemToFileItem(updateItem, file2);
            file2.UnPackSize = file.UnPackSize;
            file2.FileCRC = file.FileCRC;
            file2.IsFileCRCDefined = file.IsFileCRCDefined;
            file2.HasStream = file.HasStream;
            file = file2;
          }
        }
        newDatabase.Files.Add(file);
      }
    }
  }

  /////////////////////////////////////////
  // Compress New Files

  CObjectVector<CSolidGroup> groups;
  SplitFilesToGroups(*options.Method, options.UseFilters, options.MaxFilter, 
      updateItems, groups);

  const UInt32 kMinReduceSize = (1 << 16);
  if (inSizeForReduce < kMinReduceSize)
    inSizeForReduce = kMinReduceSize;

  for (int groupIndex = 0; groupIndex < groups.Size(); groupIndex++)
  {
    const CSolidGroup &group = groups[groupIndex];
    int numFiles = group.Indices.Size();
    if (numFiles == 0)
      continue;
    CRecordVector<CRefItem> refItems;
    refItems.Reserve(numFiles);
    bool sortByType = (numSolidFiles > 1);
    for (i = 0; i < numFiles; i++)
      refItems.Add(CRefItem(group.Indices[i], updateItems[group.Indices[i]], sortByType));
    refItems.Sort(CompareUpdateItems, (void *)&sortByType);
    
    CRecordVector<UInt32> indices;
    indices.Reserve(numFiles);

    for (i = 0; i < numFiles; i++)
    {
      UInt32 index = refItems[i].Index;
      indices.Add(index);
      /*
      const CUpdateItem &updateItem = updateItems[index];
      CFileItem file;
      if (updateItem.NewProperties)
        FromUpdateItemToFileItem(updateItem, file);
      else
        file = database.Files[updateItem.IndexInArchive];
      if (file.IsAnti || file.IsDirectory)
        return E_FAIL;
      newDatabase.Files.Add(file);
      */
    }
    
    CEncoder encoder(group.Method);

    for (i = 0; i < numFiles;)
    {
      UInt64 totalSize = 0;
      int numSubFiles;
      UString prevExtension;
      for (numSubFiles = 0; i + numSubFiles < numFiles && 
          numSubFiles < numSolidFiles; numSubFiles++)
      {
        const CUpdateItem &updateItem = updateItems[indices[i + numSubFiles]];
        totalSize += updateItem.Size;
        if (totalSize > options.NumSolidBytes)
          break;
        if (options.SolidExtension)
        {
          UString ext = updateItem.GetExtension();
          if (numSubFiles == 0)
            prevExtension = ext;
          else
            if (ext.CompareNoCase(prevExtension) != 0)
              break;
        }
      }
      if (numSubFiles < 1)
        numSubFiles = 1;

      CFolderInStream *inStreamSpec = new CFolderInStream;
      CMyComPtr<ISequentialInStream> solidInStream(inStreamSpec);
      inStreamSpec->Init(updateCallback, &indices[i], numSubFiles);
      
      CFolder folderItem;

      int startPackIndex = newDatabase.PackSizes.Size();
      RINOK(encoder.Encode(
          EXTERNAL_CODECS_LOC_VARS
          solidInStream, NULL, &inSizeForReduce, folderItem, 
          archive.SeqStream, newDatabase.PackSizes, progress));

      for (; startPackIndex < newDatabase.PackSizes.Size(); startPackIndex++)
        lps->OutSize += newDatabase.PackSizes[startPackIndex];

      lps->InSize += folderItem.GetUnPackSize();
      // for()
      // newDatabase.PackCRCsDefined.Add(false);
      // newDatabase.PackCRCs.Add(0);
      
      newDatabase.Folders.Add(folderItem);
      
      CNum numUnPackStreams = 0;
      for (int subIndex = 0; subIndex < numSubFiles; subIndex++)
      {
        const CUpdateItem &updateItem = updateItems[indices[i + subIndex]];
        CFileItem file;
        if (updateItem.NewProperties)
          FromUpdateItemToFileItem(updateItem, file);
        else
          file = database->Files[updateItem.IndexInArchive];
        if (file.IsAnti || file.IsDirectory)
          return E_FAIL;
        
        /*
        CFileItem &file = newDatabase.Files[
              startFileIndexInDatabase + i + subIndex];
        */
        if (!inStreamSpec->Processed[subIndex])
        {
          continue;
          // file.Name += L".locked";
        }

        file.FileCRC = inStreamSpec->CRCs[subIndex];
        file.UnPackSize = inStreamSpec->Sizes[subIndex];
        if (file.UnPackSize != 0)
        {
          file.IsFileCRCDefined = true;
          file.HasStream = true;
          numUnPackStreams++;
        }
        else
        {
          file.IsFileCRCDefined = false;
          file.HasStream = false;
        }
        newDatabase.Files.Add(file);
      }
      // numUnPackStreams = 0 is very bad case for locked files
      // v3.13 doesn't understand it.
      newDatabase.NumUnPackStreamsVector.Add(numUnPackStreams);
      i += numSubFiles;
    }
  }

  {
    /////////////////////////////////////////
    // Write Empty Files & Folders
    
    CRecordVector<int> emptyRefs;
    for(i = 0; i < updateItems.Size(); i++)
    {
      const CUpdateItem &updateItem = updateItems[i];
      if (updateItem.NewData)
      {
        if (updateItem.HasStream())
          continue;
      }
      else
        if (updateItem.IndexInArchive != -1)
          if (database->Files[updateItem.IndexInArchive].HasStream)
            continue;
      emptyRefs.Add(i);
    }
    emptyRefs.Sort(CompareEmptyItems, (void *)&updateItems);
    for(i = 0; i < emptyRefs.Size(); i++)
    {
      const CUpdateItem &updateItem = updateItems[emptyRefs[i]];
      CFileItem file;
      if (updateItem.NewProperties)
        FromUpdateItemToFileItem(updateItem, file);
      else
        file = database->Files[updateItem.IndexInArchive];
      newDatabase.Files.Add(file);
    }
  }
    
  /*
  if (newDatabase.Files.Size() != updateItems.Size())
    return E_FAIL;
  */

  return archive.WriteDatabase(EXTERNAL_CODECS_LOC_VARS
      newDatabase, options.HeaderMethod, options.HeaderOptions);
}