static int WriteNakedMPQHeader(TMPQArchive * ha) { TMPQHeader * pHeader = ha->pHeader; TMPQHeader Header; DWORD dwBytesToWrite = pHeader->dwHeaderSize; int nError = ERROR_SUCCESS; // Prepare the naked MPQ header memset(&Header, 0, sizeof(TMPQHeader)); Header.dwID = pHeader->dwID; Header.dwHeaderSize = pHeader->dwHeaderSize; Header.dwArchiveSize = pHeader->dwHeaderSize; Header.wFormatVersion = pHeader->wFormatVersion; Header.wSectorSize = pHeader->wSectorSize; // Write it to the file BSWAP_TMPQHEADER(&Header, MPQ_FORMAT_VERSION_1); BSWAP_TMPQHEADER(&Header, MPQ_FORMAT_VERSION_2); BSWAP_TMPQHEADER(&Header, MPQ_FORMAT_VERSION_3); BSWAP_TMPQHEADER(&Header, MPQ_FORMAT_VERSION_4); if(!FileStream_Write(ha->pStream, &ha->MpqPos, &Header, dwBytesToWrite)) nError = GetLastError(); return nError; }
bool WINAPI SFileExtractFile(HANDLE hMpq, const char * szToExtract, const char * szExtracted, DWORD dwSearchScope) { TFileStream * pLocalFile = NULL; HANDLE hMpqFile = NULL; int nError = ERROR_SUCCESS; // Open the MPQ file if (nError == ERROR_SUCCESS) { if(!SFileOpenFileEx(hMpq, szToExtract, dwSearchScope, &hMpqFile)) nError = GetLastError(); } // Create the local file if (nError == ERROR_SUCCESS) { pLocalFile = FileStream_CreateFile(szExtracted); if(pLocalFile == NULL) nError = GetLastError(); } // Copy the file's content if (nError == ERROR_SUCCESS) { char szBuffer[0x1000]; DWORD dwTransferred; for (;;) { // dwTransferred is only set to nonzero if something has been read. // nError can be ERROR_SUCCESS or ERROR_HANDLE_EOF if (!SFileReadFile(hMpqFile, szBuffer, sizeof(szBuffer), &dwTransferred, NULL)) nError = GetLastError(); if (nError == ERROR_HANDLE_EOF) nError = ERROR_SUCCESS; if (dwTransferred == 0) break; // If something has been actually read, write it if (!FileStream_Write(pLocalFile, NULL, szBuffer, dwTransferred)) nError = GetLastError(); } } // Close the files if (hMpqFile != NULL) SFileCloseFile(hMpqFile); if (pLocalFile != NULL) FileStream_Close(pLocalFile); if (nError != ERROR_SUCCESS) SetLastError(nError); return (nError == ERROR_SUCCESS); }
static int CopyNonMpqData( TMPQArchive * ha, TFileStream * pSrcStream, TFileStream * pTrgStream, uint64_t * ByteOffset, uint64_t ByteCount) { uint64_t DataSize = ByteCount; uint32_t dwToRead; char DataBuffer[0x1000]; int nError = ERROR_SUCCESS; /* Copy the data */ while(DataSize > 0) { /* Get the proper size of data */ dwToRead = sizeof(DataBuffer); if(DataSize < dwToRead) dwToRead = (uint32_t)DataSize; /* Read from the source stream */ if(!FileStream_Read(pSrcStream, ByteOffset, DataBuffer, dwToRead)) { nError = GetLastError(); break; } /* Write to the target stream */ if(!FileStream_Write(pTrgStream, NULL, DataBuffer, dwToRead)) { nError = GetLastError(); break; } /* Update the progress */ if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwToRead; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes); } /* Decrement the number of data to be copied */ *ByteOffset += dwToRead; DataSize -= dwToRead; } return nError; }
static int CopyNonMpqData( TMPQArchive * ha, TFileStream * pSrcStream, TFileStream * pTrgStream, ULONGLONG & ByteOffset, ULONGLONG & ByteCount) { ULONGLONG DataSize = ByteCount; DWORD dwToRead; char DataBuffer[0x1000]; int nError = ERROR_SUCCESS; // Copy the data while(DataSize > 0) { // Get the proper size of data dwToRead = sizeof(DataBuffer); if(DataSize < dwToRead) dwToRead = (DWORD)DataSize; // Read from the source stream if(!FileStream_Read(pSrcStream, &ByteOffset, DataBuffer, dwToRead)) { nError = GetLastError(); break; } // Write to the target stream if(!FileStream_Write(pTrgStream, NULL, DataBuffer, dwToRead)) { nError = GetLastError(); break; } // Update the progress if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwToRead; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes); } // Decrement the number of data to be copied ByteOffset += dwToRead; DataSize -= dwToRead; } return nError; }
int ExtractFileToHardDrive(HANDLE &MPQ_handle, const char * szArchivedFile, const char * szFileName) { HANDLE hFile = NULL; // Archived file handle TFileStream* handle = NULL; // Disk file handle int nError = ERROR_SUCCESS; // Result value if (nError == ERROR_SUCCESS) { if (!SFileOpenFileEx(MPQ_handle, szArchivedFile, SFILE_OPEN_PATCHED_FILE, &hFile)) nError = GetLastError(); } // Create the target file if (nError == ERROR_SUCCESS) { handle = FileStream_CreateFile(szFileName); if (handle == NULL) nError = GetLastError(); } // Read the file from the archive if (nError == ERROR_SUCCESS) { // Get the size of the full patched file DWORD dwFileSize = SFileGetFileSize(hFile, NULL); if (dwFileSize != 0) { // Allocate space for the full file BYTE * pbFullFile = new BYTE[dwFileSize]; if (!SFileReadFile(hFile, pbFullFile, dwFileSize)) { nError = GetLastError(); printf("Failed to read full patched file data \"%s\"\n", szFileName); assert(false); } FileStream_Write(handle, NULL, pbFullFile, dwFileSize); delete [] pbFullFile; } } // Cleanup and exit if (handle != NULL) FileStream_Close(handle); if (hFile != NULL) SFileCloseFile(hFile); return nError; }
static int CopyNonMpqData( TFileStream * pSrcStream, TFileStream * pTrgStream, LARGE_INTEGER & ByteOffset, LARGE_INTEGER & ByteCount) { LARGE_INTEGER DataSize = ByteCount; DWORD dwToRead; char DataBuffer[0x1000]; int nError = ERROR_SUCCESS; // Copy the data while(DataSize.QuadPart > 0) { // Get the proper size of data dwToRead = sizeof(DataBuffer); if(DataSize.QuadPart < dwToRead) dwToRead = DataSize.LowPart; // Read from the source stream if(!FileStream_Read(pSrcStream, &ByteOffset, DataBuffer, dwToRead)) { nError = GetLastError(); break; } // Write to the target stream if(!FileStream_Write(pTrgStream, NULL, DataBuffer, dwToRead)) { nError = GetLastError(); break; } // Update the progress if(CompactCB != NULL) { CompactBytesProcessed.QuadPart += dwToRead; CompactCB(pvUserData, CCB_COPYING_NON_MPQ_DATA, &CompactBytesProcessed, &CompactTotalBytes); } // Decrement the number of data to be copied ByteOffset.QuadPart += dwToRead; DataSize.QuadPart -= dwToRead; } return ERROR_SUCCESS; }
int SSignFileFinish(TMPQArchive * ha) { MPQ_SIGNATURE_INFO si; unsigned long signature_len = MPQ_WEAK_SIGNATURE_SIZE; BYTE WeakSignature[MPQ_SIGNATURE_FILE_SIZE]; BYTE Md5Digest[MD5_DIGEST_SIZE]; rsa_key key; int hash_idx = find_hash("md5"); // Sanity checks assert((ha->dwFlags & MPQ_FLAG_CHANGED) == 0); assert(ha->dwFileFlags3 == MPQ_FILE_EXISTS); // Query the weak signature info memset(&si, 0, sizeof(MPQ_SIGNATURE_INFO)); if(!QueryMpqSignatureInfo(ha, &si)) return ERROR_FILE_CORRUPT; // There must be exactly one signature if(si.SignatureTypes != SIGNATURE_TYPE_WEAK) return ERROR_FILE_CORRUPT; // Calculate MD5 of the entire archive if(!CalculateMpqHashMd5(ha, &si, Md5Digest)) return ERROR_VERIFY_FAILED; // Decode the private key if(!decode_base64_key(szBlizzardWeakPrivateKey, &key)) return ERROR_VERIFY_FAILED; // Sign the hash memset(WeakSignature, 0, sizeof(WeakSignature)); rsa_sign_hash_ex(Md5Digest, sizeof(Md5Digest), WeakSignature + 8, &signature_len, LTC_LTC_PKCS_1_V1_5, 0, 0, hash_idx, 0, &key); memrev(WeakSignature + 8, MPQ_WEAK_SIGNATURE_SIZE); rsa_free(&key); // Write the signature to the MPQ. Don't use SFile* functions, but write the hash directly if(!FileStream_Write(ha->pStream, &si.BeginExclude, WeakSignature, MPQ_SIGNATURE_FILE_SIZE)) return GetLastError(); return ERROR_SUCCESS; }
static int WriteDataToMpqFile( TMPQArchive * ha, TMPQFile * hf, LPBYTE pbFileData, DWORD dwDataSize, DWORD dwCompression) { TFileEntry * pFileEntry = hf->pFileEntry; ULONGLONG ByteOffset; LPBYTE pbCompressed = NULL; // Compressed (target) data LPBYTE pbToWrite = NULL; // Data to write to the file int nCompressionLevel = -1; // ADPCM compression level (only used for wave files) int nError = ERROR_SUCCESS; // If the caller wants ADPCM compression, we will set wave compression level to 4, // which corresponds to medium quality if(dwCompression & LOSSY_COMPRESSION_MASK) nCompressionLevel = 4; // Make sure that the caller won't overrun the previously initiated file size assert(hf->dwFilePos + dwDataSize <= pFileEntry->dwFileSize); assert(hf->dwSectorCount != 0); assert(hf->pbFileSector != NULL); if((hf->dwFilePos + dwDataSize) > pFileEntry->dwFileSize) return ERROR_DISK_FULL; pbToWrite = hf->pbFileSector; // Now write all data to the file sector buffer if(nError == ERROR_SUCCESS) { DWORD dwBytesInSector = hf->dwFilePos % hf->dwSectorSize; DWORD dwSectorIndex = hf->dwFilePos / hf->dwSectorSize; DWORD dwBytesToCopy; // Process all data. while(dwDataSize != 0) { dwBytesToCopy = dwDataSize; // Check for sector overflow if(dwBytesToCopy > (hf->dwSectorSize - dwBytesInSector)) dwBytesToCopy = (hf->dwSectorSize - dwBytesInSector); // Copy the data to the file sector memcpy(hf->pbFileSector + dwBytesInSector, pbFileData, dwBytesToCopy); dwBytesInSector += dwBytesToCopy; pbFileData += dwBytesToCopy; dwDataSize -= dwBytesToCopy; // Update the file position hf->dwFilePos += dwBytesToCopy; // If the current sector is full, or if the file is already full, // then write the data to the MPQ if(dwBytesInSector >= hf->dwSectorSize || hf->dwFilePos >= pFileEntry->dwFileSize) { // Set the position in the file ByteOffset = hf->RawFilePos + pFileEntry->dwCmpSize; // Update CRC32 and MD5 of the file md5_process((hash_state *)hf->hctx, hf->pbFileSector, dwBytesInSector); hf->dwCrc32 = crc32(hf->dwCrc32, hf->pbFileSector, dwBytesInSector); // Compress the file sector, if needed if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED) { int nOutBuffer = (int)dwBytesInSector; int nInBuffer = (int)dwBytesInSector; // If the file is compressed, allocate buffer for the compressed data. // Note that we allocate buffer that is a bit longer than sector size, // for case if the compression method performs a buffer overrun if(pbCompressed == NULL) { pbToWrite = pbCompressed = STORM_ALLOC(BYTE, hf->dwSectorSize + 0x100); if(pbCompressed == NULL) { nError = ERROR_NOT_ENOUGH_MEMORY; break; } } // // Note that both SCompImplode and SCompCompress give original buffer, // if they are unable to comperss the data. // if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE) { SCompImplode((char *)pbCompressed, &nOutBuffer, (char *)hf->pbFileSector, nInBuffer); } if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS) { SCompCompress((char *)pbCompressed, &nOutBuffer, (char *)hf->pbFileSector, nInBuffer, (unsigned)dwCompression, 0, nCompressionLevel); } // Update sector positions dwBytesInSector = nOutBuffer; if(hf->SectorOffsets != NULL) hf->SectorOffsets[dwSectorIndex+1] = hf->SectorOffsets[dwSectorIndex] + dwBytesInSector; // We have to calculate sector CRC, if enabled if(hf->SectorChksums != NULL) hf->SectorChksums[dwSectorIndex] = adler32(0, pbCompressed, nOutBuffer); } // Encrypt the sector, if necessary if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) { BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector); EncryptMpqBlock(pbToWrite, dwBytesInSector, hf->dwFileKey + dwSectorIndex); BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector); } // Write the file sector if(!FileStream_Write(ha->pStream, &ByteOffset, pbToWrite, dwBytesInSector)) { nError = GetLastError(); break; } // Call the compact callback, if any if(AddFileCB != NULL) AddFileCB(pvUserData, hf->dwFilePos, hf->dwDataSize, false); // Update the compressed file size pFileEntry->dwCmpSize += dwBytesInSector; dwBytesInSector = 0; dwSectorIndex++; } } } // Cleanup if(pbCompressed != NULL) STORM_FREE(pbCompressed); return nError; }
static int RecryptFileData( TMPQArchive * ha, TMPQFile * hf, const char * szFileName, const char * szNewFileName) { ULONGLONG RawFilePos; TFileEntry * pFileEntry = hf->pFileEntry; DWORD dwBytesToRecrypt = pFileEntry->dwCmpSize; DWORD dwOldKey; DWORD dwNewKey; int nError = ERROR_SUCCESS; // The file must be encrypted assert(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED); // File decryption key is calculated from the plain name szNewFileName = GetPlainFileNameA(szNewFileName); szFileName = GetPlainFileNameA(szFileName); // Calculate both file keys dwOldKey = DecryptFileKey(szFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags); dwNewKey = DecryptFileKey(szNewFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags); // Incase the keys are equal, don't recrypt the file if(dwNewKey == dwOldKey) return ERROR_SUCCESS; hf->dwFileKey = dwOldKey; // Calculate the raw position of the file in the archive hf->MpqFilePos = pFileEntry->ByteOffset; hf->RawFilePos = ha->MpqPos + hf->MpqFilePos; // Allocate buffer for file transfer nError = AllocateSectorBuffer(hf); if(nError != ERROR_SUCCESS) return nError; // Also allocate buffer for sector offsets // Note: Don't load sector checksums, we don't need to recrypt them nError = AllocateSectorOffsets(hf, true); if(nError != ERROR_SUCCESS) return nError; // If we have sector offsets, recrypt these as well if(hf->SectorOffsets != NULL) { // Allocate secondary buffer for sectors copy DWORD * SectorOffsetsCopy = (DWORD *)STORM_ALLOC(BYTE, hf->SectorOffsets[0]); DWORD dwSectorOffsLen = hf->SectorOffsets[0]; if(SectorOffsetsCopy == NULL) return ERROR_NOT_ENOUGH_MEMORY; // Recrypt the array of sector offsets memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen); EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwNewKey - 1); BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen); // Write the recrypted array back if(!FileStream_Write(ha->pStream, &hf->RawFilePos, SectorOffsetsCopy, dwSectorOffsLen)) nError = GetLastError(); STORM_FREE(SectorOffsetsCopy); } // Now we have to recrypt all file sectors. We do it without // recompression, because recompression is not necessary in this case if(nError == ERROR_SUCCESS) { for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++) { DWORD dwRawDataInSector = hf->dwSectorSize; DWORD dwRawByteOffset = dwSector * hf->dwSectorSize; // Last sector: If there is not enough bytes remaining in the file, cut the raw size if(dwRawDataInSector > dwBytesToRecrypt) dwRawDataInSector = dwBytesToRecrypt; // Fix the raw data length if the file is compressed if(hf->SectorOffsets != NULL) { dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector]; dwRawByteOffset = hf->SectorOffsets[dwSector]; } // Calculate the raw file offset of the file sector CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset); // Read the file sector if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // If necessary, re-encrypt the sector // Note: Recompression is not necessary here. Unlike encryption, // the compression does not depend on the position of the file in MPQ. BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwOldKey + dwSector); EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwNewKey + dwSector); BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); // Write the sector back if(!FileStream_Write(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // Decrement number of bytes remaining dwBytesToRecrypt -= hf->dwSectorSize; } } return nError; }
static int ExtractFile(HANDLE hStorage, const char * szFileName, const TCHAR * szLocalPath, DWORD dwLocaleFlags) { TFileStream * pStream = NULL; HANDLE hFile = NULL; TCHAR szLocalFileName[MAX_PATH]; TCHAR * szNamePtr = szLocalFileName; BYTE Buffer[0x1000]; DWORD dwBytesRead; int nError = ERROR_SUCCESS; // Create the file path _tcscpy(szNamePtr, szLocalPath); szNamePtr += _tcslen(szLocalPath); *szNamePtr++ = _T('\\'); // Copy the plain file name CopyString(szNamePtr, szFileName, strlen(szFileName)); // Open the CASC file if(nError == ERROR_SUCCESS) { // Open a file if(!CascOpenFile(hStorage, szFileName, dwLocaleFlags, 0, &hFile)) { assert(GetLastError() != ERROR_SUCCESS); nError = GetLastError(); } } // Create the local file if(nError == ERROR_SUCCESS) { pStream = FileStream_CreateFile(szLocalFileName, 0); if(pStream == NULL) { // Try to create all directories and retry ForceCreatePath(szLocalFileName); pStream = FileStream_CreateFile(szLocalFileName, 0); if(pStream == NULL) nError = GetLastError(); } } // Read some data from the file if(nError == ERROR_SUCCESS) { for(;;) { // Read data from the file CascReadFile(hFile, Buffer, sizeof(Buffer), &dwBytesRead); if(dwBytesRead == 0) break; // Write the local file FileStream_Write(pStream, NULL, Buffer, dwBytesRead); } } // Close handles if(pStream != NULL) FileStream_Close(pStream); if(hFile != NULL) CascCloseFile(hFile); return nError; }
bool WINAPI SFileCompactArchive(HANDLE hMpq, const char * szListFile, bool /* bReserved */) { TFileStream * pTempStream = NULL; TMPQArchive * ha = (TMPQArchive *)hMpq; ULONGLONG ByteOffset; ULONGLONG ByteCount; LPDWORD pFileKeys = NULL; char szTempFile[MAX_PATH] = ""; char * szTemp = NULL; int nError = ERROR_SUCCESS; // Test the valid parameters if (!IsValidMpqHandle(ha)) nError = ERROR_INVALID_HANDLE; if (ha->dwFlags & MPQ_FLAG_READ_ONLY) nError = ERROR_ACCESS_DENIED; if (ha->pHetTable != NULL || ha->pBetTable != NULL) nError = ERROR_ACCESS_DENIED; // Create the table with file keys if (nError == ERROR_SUCCESS) { if ((pFileKeys = ALLOCMEM(DWORD, ha->dwFileTableSize)) != NULL) memset(pFileKeys, 0, sizeof(DWORD) * ha->dwFileTableSize); else nError = ERROR_NOT_ENOUGH_MEMORY; } // First of all, we have to check of we are able to decrypt all files. // If not, sorry, but the archive cannot be compacted. if (nError == ERROR_SUCCESS) { // Initialize the progress variables for compact callback FileStream_GetSize(ha->pStream, CompactTotalBytes); CompactBytesProcessed = 0; nError = CheckIfAllFilesKnown(ha, szListFile, pFileKeys); } // Get the temporary file name and create it if (nError == ERROR_SUCCESS) { strcpy(szTempFile, ha->pStream->szFileName); if ((szTemp = strrchr(szTempFile, '.')) != NULL) strcpy(szTemp + 1, "mp_"); else strcat(szTempFile, "_"); pTempStream = FileStream_CreateFile(szTempFile); if (pTempStream == NULL) nError = GetLastError(); } // Write the data before MPQ user data (if any) if (nError == ERROR_SUCCESS && ha->UserDataPos != 0) { // Inform the application about the progress if (CompactCB != NULL) CompactCB(pvUserData, CCB_COPYING_NON_MPQ_DATA, CompactBytesProcessed, CompactTotalBytes); ByteOffset = 0; ByteCount = ha->UserDataPos; nError = CopyNonMpqData(ha->pStream, pTempStream, ByteOffset, ByteCount); } // Write the MPQ user data (if any) if (nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos) { // At this point, we assume that the user data size is equal // to pUserData->dwHeaderOffs. // If this assumption doesn't work, then we have an unknown version of MPQ ByteOffset = ha->UserDataPos; ByteCount = ha->MpqPos - ha->UserDataPos; assert(ha->pUserData != NULL); assert(ha->pUserData->dwHeaderOffs == ByteCount); nError = CopyNonMpqData(ha->pStream, pTempStream, ByteOffset, ByteCount); } // Write the MPQ header if (nError == ERROR_SUCCESS) { // Remember the header size before swapping DWORD dwBytesToWrite = ha->pHeader->dwHeaderSize; BSWAP_TMPQHEADER(ha->pHeader); if (!FileStream_Write(pTempStream, NULL, ha->pHeader, dwBytesToWrite)) nError = GetLastError(); BSWAP_TMPQHEADER(ha->pHeader); // Update the progress CompactBytesProcessed += ha->pHeader->dwHeaderSize; ha->dwFlags &= ~MPQ_FLAG_NO_HEADER; } // Now copy all files if (nError == ERROR_SUCCESS) { nError = CopyMpqFiles(ha, pFileKeys, pTempStream); } // If succeeded, switch the streams if (nError == ERROR_SUCCESS) { if (FileStream_MoveFile(ha->pStream, pTempStream)) pTempStream = NULL; else nError = ERROR_CAN_NOT_COMPLETE; } // If all succeeded, save the MPQ tables if (nError == ERROR_SUCCESS) { // // Note: We don't recalculate position of the MPQ tables at this point. // SaveMPQTables does it automatically. // nError = SaveMPQTables(ha); if (nError == ERROR_SUCCESS && CompactCB != NULL) { CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash)); CompactBytesProcessed += (ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock)); CompactCB(pvUserData, CCB_CLOSING_ARCHIVE, CompactBytesProcessed, CompactTotalBytes); } } // Invalidate the compact callback pvUserData = NULL; CompactCB = NULL; // Cleanup and return if (pTempStream != NULL) FileStream_Close(pTempStream); if (pFileKeys != NULL) FREEMEM(pFileKeys); if (nError != ERROR_SUCCESS) SetLastError(nError); return (nError == ERROR_SUCCESS); }
// Copies all file sectors into another archive. static int CopyMpqFileSectors( TMPQArchive * ha, TMPQFile * hf, TFileStream * pNewStream) { TFileEntry * pFileEntry = hf->pFileEntry; ULONGLONG RawFilePos; // Used for calculating sector offset in the old MPQ archive ULONGLONG MpqFilePos; // MPQ file position in the new archive DWORD dwBytesToCopy = pFileEntry->dwCmpSize; DWORD dwFileKey1 = 0; // File key used for decryption DWORD dwFileKey2 = 0; // File key used for encryption DWORD dwCmpSize = 0; // Compressed file size int nError = ERROR_SUCCESS; // Remember the position in the destination file FileStream_GetPos(pNewStream, MpqFilePos); MpqFilePos -= ha->MpqPos; // Resolve decryption keys. Note that the file key given // in the TMPQFile structure also includes the key adjustment if (nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)) { dwFileKey2 = dwFileKey1 = hf->dwFileKey; if (pFileEntry->dwFlags & MPQ_FILE_FIX_KEY) { dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (DWORD)pFileEntry->ByteOffset; dwFileKey2 = (dwFileKey2 + (DWORD)MpqFilePos) ^ pFileEntry->dwFileSize; } } // If we have to save patch header, do it if (nError == ERROR_SUCCESS && hf->PatchInfo != NULL) { BSWAP_ARRAY32_UNSIGNED(hf->PatchInfo, sizeof(DWORD) * 3); if (!FileStream_Write(pNewStream, NULL, hf->PatchInfo, hf->PatchInfo->dwLength)) nError = GetLastError(); // Note: In wow-update-12694.MPQ, the dwCmpSize doesn't // include the patch header on some files. dwCmpSize += hf->PatchInfo->dwLength; } // If we have to save sector offset table, do it. if (nError == ERROR_SUCCESS && hf->SectorOffsets != NULL) { LPDWORD SectorOffsetsCopy = ALLOCMEM(DWORD, hf->dwSectorCount); DWORD dwSectorPosLen = hf->dwSectorCount * sizeof(DWORD); assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0); assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED); if (SectorOffsetsCopy == NULL) nError = ERROR_NOT_ENOUGH_MEMORY; // Encrypt the secondary sector offset table and write it to the target file if (nError == ERROR_SUCCESS) { memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorPosLen); if (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) EncryptMpqBlock(SectorOffsetsCopy, dwSectorPosLen, dwFileKey2 - 1); BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorPosLen); if (!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorPosLen)) nError = GetLastError(); dwCmpSize += dwSectorPosLen; } // Update compact progress if (CompactCB != NULL) { CompactBytesProcessed += dwSectorPosLen; CompactCB(pvUserData, CCB_COMPACTING_FILES, CompactBytesProcessed, CompactTotalBytes); } FREEMEM(SectorOffsetsCopy); } // Now we have to copy all file sectors. We do it without // recompression, because recompression is not necessary in this case if (nError == ERROR_SUCCESS) { for(DWORD dwSector = 0; dwSector < hf->dwDataSectors; dwSector++) { DWORD dwRawDataInSector = hf->dwSectorSize; DWORD dwRawByteOffset = dwSector * hf->dwSectorSize; // Last sector: If there is not enough bytes remaining in the file, cut the raw size if (dwRawDataInSector > dwBytesToCopy) dwRawDataInSector = dwBytesToCopy; // Fix the raw data length if the file is compressed if (hf->SectorOffsets != NULL) { dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector]; dwRawByteOffset = hf->SectorOffsets[dwSector]; } // Calculate the raw file offset of the file sector CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset); // Read the file sector if (!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // If necessary, re-encrypt the sector // Note: Recompression is not necessary here. Unlike encryption, // the compression does not depend on the position of the file in MPQ. if ((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2) { BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector); EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector); BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); } // Now write the sector back to the file if (!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // Update compact progress if (CompactCB != NULL) { CompactBytesProcessed += dwRawDataInSector; CompactCB(pvUserData, CCB_COMPACTING_FILES, CompactBytesProcessed, CompactTotalBytes); } // Adjust byte counts dwBytesToCopy -= hf->dwSectorSize; dwCmpSize += dwRawDataInSector; } } // Copy the sector CRCs, if any // Sector CRCs are always compressed (not imploded) and unencrypted if (nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL) { DWORD dwCrcLength; dwCrcLength = hf->SectorOffsets[hf->dwSectorCount - 1] - hf->SectorOffsets[hf->dwSectorCount - 2]; if (dwCrcLength != 0) { if (!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); if (!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); // Update compact progress if (CompactCB != NULL) { CompactBytesProcessed += dwCrcLength; CompactCB(pvUserData, CCB_COMPACTING_FILES, CompactBytesProcessed, CompactTotalBytes); } // Size of the CRC block is also included in the compressed file size dwCmpSize += dwCrcLength; } } // Update file position in the block table if (nError == ERROR_SUCCESS) { // At this point, number of bytes written should be exactly // the same like the compressed file size. If it isn't, // there's something wrong (an unknown archive version, MPQ protection, ...) // // Note: Diablo savegames have very weird layout, and the file "hero" // seems to have improper compressed size. Instead of real compressed size, // the "dwCmpSize" member of the block table entry contains // uncompressed size of file data + size of the sector table. // If we compact the archive, Diablo will refuse to load the game // Seems like some sort of protection to me. if (dwCmpSize == pFileEntry->dwCmpSize) { // Update file pos in the block table pFileEntry->ByteOffset = MpqFilePos; } else { nError = ERROR_FILE_CORRUPT; assert(false); } } return nError; }
bool WINAPI SFileCompactArchive(HANDLE hMpq, const char * szListFile, bool /* bReserved */) { TFileStream * pTempStream = NULL; TMPQArchive * ha = (TMPQArchive *)hMpq; ULONGLONG ByteOffset; ULONGLONG ByteCount; LPDWORD pFileKeys = NULL; TCHAR szTempFile[MAX_PATH] = _T(""); TCHAR * szTemp = NULL; int nError = ERROR_SUCCESS; // Test the valid parameters if(!IsValidMpqHandle(hMpq)) nError = ERROR_INVALID_HANDLE; if(ha->dwFlags & MPQ_FLAG_READ_ONLY) nError = ERROR_ACCESS_DENIED; // If the MPQ is changed at this moment, we have to flush the archive if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED)) { SFileFlushArchive(hMpq); } // Create the table with file keys if(nError == ERROR_SUCCESS) { if((pFileKeys = STORM_ALLOC(DWORD, ha->dwFileTableSize)) != NULL) memset(pFileKeys, 0, sizeof(DWORD) * ha->dwFileTableSize); else nError = ERROR_NOT_ENOUGH_MEMORY; } // First of all, we have to check of we are able to decrypt all files. // If not, sorry, but the archive cannot be compacted. if(nError == ERROR_SUCCESS) { // Initialize the progress variables for compact callback FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes)); ha->CompactBytesProcessed = 0; nError = CheckIfAllFilesKnown(ha, szListFile, pFileKeys); } // Get the temporary file name and create it if(nError == ERROR_SUCCESS) { _tcscpy(szTempFile, FileStream_GetFileName(ha->pStream)); if((szTemp = _tcsrchr(szTempFile, '.')) != NULL) _tcscpy(szTemp + 1, _T("mp_")); else _tcscat(szTempFile, _T("_")); pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE); if(pTempStream == NULL) nError = GetLastError(); } // Write the data before MPQ user data (if any) if(nError == ERROR_SUCCESS && ha->UserDataPos != 0) { // Inform the application about the progress if(ha->pfnCompactCB != NULL) ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes); ByteOffset = 0; ByteCount = ha->UserDataPos; nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount); } // Write the MPQ user data (if any) if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos) { // At this point, we assume that the user data size is equal // to pUserData->dwHeaderOffs. // If this assumption doesn't work, then we have an unknown version of MPQ ByteOffset = ha->UserDataPos; ByteCount = ha->MpqPos - ha->UserDataPos; assert(ha->pUserData != NULL); assert(ha->pUserData->dwHeaderOffs == ByteCount); nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount); } // Write the MPQ header if(nError == ERROR_SUCCESS) { TMPQHeader SaveMpqHeader; // Write the MPQ header to the file memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4); if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize)) nError = GetLastError(); // Update the progress ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize; } // Now copy all files if(nError == ERROR_SUCCESS) nError = CopyMpqFiles(ha, pFileKeys, pTempStream); // Defragment the file table if(nError == ERROR_SUCCESS) nError = RebuildFileTable(ha, ha->pHeader->dwHashTableSize, ha->dwMaxFileCount); // We also need to rebuild the HET table, if any if(nError == ERROR_SUCCESS) { // Invalidate (listfile) and (attributes) InvalidateInternalFiles(ha); // Rebuild the HET table, if we have any if(ha->pHetTable != NULL) nError = RebuildHetTable(ha); } // If succeeded, switch the streams if(nError == ERROR_SUCCESS) { if(FileStream_Replace(ha->pStream, pTempStream)) pTempStream = NULL; else nError = ERROR_CAN_NOT_COMPLETE; } // If all succeeded, save the MPQ tables if(nError == ERROR_SUCCESS) { // // Note: We don't recalculate position of the MPQ tables at this point. // SaveMPQTables does it automatically. // nError = SaveMPQTables(ha); if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash)); ha->CompactBytesProcessed += (ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock)); ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes); } } // Cleanup and return if(pTempStream != NULL) FileStream_Close(pTempStream); if(pFileKeys != NULL) STORM_FREE(pFileKeys); if(nError != ERROR_SUCCESS) SetLastError(nError); return (nError == ERROR_SUCCESS); }
/* Copies all file sectors into another archive. */ static int CopyMpqFileSectors( TMPQArchive * ha, TMPQFile * hf, TFileStream * pNewStream, uint64_t MpqFilePos) /* MPQ file position in the new archive */ { TFileEntry * pFileEntry = hf->pFileEntry; uint64_t RawFilePos; /* Used for calculating sector offset in the old MPQ archive */ uint32_t dwBytesToCopy = pFileEntry->dwCmpSize; uint32_t dwPatchSize = 0; /* Size of patch header */ uint32_t dwFileKey1 = 0; /* File key used for decryption */ uint32_t dwFileKey2 = 0; /* File key used for encryption */ uint32_t dwCmpSize = 0; /* Compressed file size, including patch header */ int nError = ERROR_SUCCESS; /* Resolve decryption keys. Note that the file key given */ /* in the TMPQFile structure also includes the key adjustment */ if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)) { dwFileKey2 = dwFileKey1 = hf->dwFileKey; if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY) { dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (uint32_t)pFileEntry->ByteOffset; dwFileKey2 = (dwFileKey2 + (uint32_t)MpqFilePos) ^ pFileEntry->dwFileSize; } } /* If we have to save patch header, do it */ if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL) { BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(uint32_t) * 3); if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength)) nError = GetLastError(); /* Save the size of the patch info */ dwPatchSize = hf->pPatchInfo->dwLength; } /* If we have to save sector offset table, do it. */ if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL) { uint32_t * SectorOffsetsCopy = STORM_ALLOC(uint32_t, hf->SectorOffsets[0] / sizeof(uint32_t)); uint32_t dwSectorOffsLen = hf->SectorOffsets[0]; assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0); assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK); if(SectorOffsetsCopy == NULL) nError = ERROR_NOT_ENOUGH_MEMORY; /* Encrypt the secondary sector offset table and write it to the target file */ if(nError == ERROR_SUCCESS) { memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen); if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1); BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen); if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen)) nError = GetLastError(); dwBytesToCopy -= dwSectorOffsLen; dwCmpSize += dwSectorOffsLen; } /* Update compact progress */ if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwSectorOffsLen; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } STORM_FREE(SectorOffsetsCopy); } /* Now we have to copy all file sectors. We do it without */ /* recompression, because recompression is not necessary in this case */ if(nError == ERROR_SUCCESS) { uint32_t dwSector; for(dwSector = 0; dwSector < hf->dwSectorCount; dwSector++) { uint32_t dwRawDataInSector = hf->dwSectorSize; uint32_t dwRawByteOffset = dwSector * hf->dwSectorSize; /* Fix the raw data length if the file is compressed */ if(hf->SectorOffsets != NULL) { dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector]; dwRawByteOffset = hf->SectorOffsets[dwSector]; } /* Last sector: If there is not enough bytes remaining in the file, cut the raw size */ if(dwRawDataInSector > dwBytesToCopy) dwRawDataInSector = dwBytesToCopy; /* Calculate the raw file offset of the file sector */ RawFilePos = CalculateRawSectorOffset(hf, dwRawByteOffset); /* Read the file sector */ if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } /* If necessary, re-encrypt the sector */ /* Note: Recompression is not necessary here. Unlike encryption, */ /* the compression does not depend on the position of the file in MPQ. */ if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2) { if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT) DecryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent)); if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS) DecryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis)); BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector); EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector); BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS) EncryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis)); if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT) EncryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent)); } /* Now write the sector back to the file */ if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } /* Update compact progress */ if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwRawDataInSector; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } /* Adjust byte counts */ dwBytesToCopy -= dwRawDataInSector; dwCmpSize += dwRawDataInSector; } } /* Copy the sector CRCs, if any */ /* Sector CRCs are always compressed (not imploded) and unencrypted */ if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL) { uint32_t dwCrcLength; dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount]; if(dwCrcLength != 0) { if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); /* Update compact progress */ if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwCrcLength; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } /* Size of the CRC block is also included in the compressed file size */ dwBytesToCopy -= dwCrcLength; dwCmpSize += dwCrcLength; } } /* There might be extra data beyond sector checksum table * Sometimes, these data are even part of sector offset table * Examples: * 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc * 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml */ if(nError == ERROR_SUCCESS && dwBytesToCopy != 0) { unsigned char * pbExtraData; /* Allocate space for the extra data */ pbExtraData = STORM_ALLOC(uint8_t, dwBytesToCopy); if(pbExtraData != NULL) { if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy)) nError = GetLastError(); if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy)) nError = GetLastError(); /* Include these extra data in the compressed size */ dwCmpSize += dwBytesToCopy; STORM_FREE(pbExtraData); } else nError = ERROR_NOT_ENOUGH_MEMORY; } /* Write the MD5's of the raw file data, if needed */ if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0) { nError = WriteMpqDataMD5(pNewStream, ha->MpqPos + MpqFilePos, pFileEntry->dwCmpSize, ha->pHeader->dwRawChunkSize); } /* Verify the number of bytes written */ if(nError == ERROR_SUCCESS) { /* At this point, number of bytes written should be exactly * the same like the compressed file size. If it isn't, * there's something wrong (an unknown archive version, MPQ malformation, ...) * * Note: Diablo savegames have very weird layout, and the file "hero" * seems to have improper compressed size. Instead of real compressed size, * the "dwCmpSize" member of the block table entry contains * uncompressed size of file data + size of the sector table. * If we compact the archive, Diablo will refuse to load the game * * Note: Some patch files in WOW patches don't count the patch header * into compressed size */ if(!(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize)) { nError = ERROR_FILE_CORRUPT; assert(0); } } return nError; }
int SFileAddFile_Write(TMPQFile * hf, const void * pvData, DWORD dwSize, DWORD dwCompression) { TMPQArchive * ha; TMPQBlock * pBlock; DWORD dwSectorPosLen = 0; int nError = ERROR_SUCCESS; // Don't bother if the caller gave us zero size if(pvData == NULL || dwSize == 0) return ERROR_SUCCESS; // Get pointer to the MPQ archive pBlock = hf->pBlock; ha = hf->ha; // Allocate file buffers if(hf->pbFileSector == NULL) { nError = AllocateSectorBuffer(hf); if(nError != ERROR_SUCCESS) { hf->bErrorOccured = true; return nError; } // Allocate sector offsets if(hf->SectorOffsets == NULL) { nError = AllocateSectorOffsets(hf, false); if(nError != ERROR_SUCCESS) { hf->bErrorOccured = true; return nError; } } // Create array of sector checksums if(hf->SectorChksums == NULL && (pBlock->dwFlags & MPQ_FILE_SECTOR_CRC)) { nError = AllocateSectorChecksums(hf, false); if(nError != ERROR_SUCCESS) { hf->bErrorOccured = true; return nError; } } // Pre-save the sector offset table, just to reserve space in the file. // Note that we dont need to swap the sector positions, nor encrypt the table // at the moment, as it will be written again after writing all file sectors. if(hf->SectorOffsets != NULL) { dwSectorPosLen = hf->dwSectorCount * sizeof(DWORD); if(!FileStream_Write(ha->pStream, &hf->RawFilePos, hf->SectorOffsets, dwSectorPosLen)) nError = GetLastError(); pBlock->dwCSize += dwSectorPosLen; } } // Write the MPQ data to the file if(nError == ERROR_SUCCESS) nError = WriteDataToMpqFile(ha, hf, (LPBYTE)pvData, dwSize, dwCompression); // If it succeeded and we wrote all the file data, // we need to re-save sector offset table if((nError == ERROR_SUCCESS) && (hf->dwFilePos >= pBlock->dwFSize)) { // Finish calculating CRC32 if(hf->pCrc32 != NULL) *hf->pCrc32 = hf->dwCrc32; // Finish calculating MD5 if(hf->pMd5 != NULL) tommd5_done((hash_state *)hf->hctx, hf->pMd5->Value); // If we also have sector checksums, write them to the file if(hf->SectorChksums != NULL) WriteSectorChecksums(hf); // Now write sector offsets to the file if(hf->SectorOffsets != NULL) WriteSectorOffsets(hf); } if(nError != ERROR_SUCCESS) hf->bErrorOccured = true; return nError; }
int SFileAddFile_Init( TMPQArchive * ha, const char * szArchivedName, TMPQFileTime * pFT, DWORD dwFileSize, LCID lcLocale, DWORD dwFlags, TMPQFile ** phf) { LARGE_INTEGER TempPos; // For various file offset calculations TMPQFile * hf = NULL; // File structure for newly added file int nError = ERROR_SUCCESS; // // Note: This is an internal function so no validity checks are done. // It is the caller's responsibility to make sure that no invalid // flags get to this point // // Adjust file flags for too-small files if(dwFileSize < 0x04) dwFlags &= ~(MPQ_FILE_ENCRYPTED | MPQ_FILE_FIX_KEY); if(dwFileSize < 0x20) dwFlags &= ~(MPQ_FILE_COMPRESSED | MPQ_FILE_SECTOR_CRC); // Allocate the TMPQFile entry for newly added file hf = CreateMpqFile(ha, szArchivedName); if(hf == NULL) nError = ERROR_NOT_ENOUGH_MEMORY; // If the MPQ header has not yet been written, do it now if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_NO_HEADER)) { // Remember the header size before swapping DWORD dwBytesToWrite = ha->pHeader->dwHeaderSize; BSWAP_TMPQHEADER(ha->pHeader); if(FileStream_Write(ha->pStream, &ha->MpqPos, ha->pHeader, dwBytesToWrite)) ha->dwFlags &= ~MPQ_FLAG_NO_HEADER; else nError = GetLastError(); BSWAP_TMPQHEADER(ha->pHeader); } if(nError == ERROR_SUCCESS) { // Check if the file already exists in the archive if((hf->pHash = GetHashEntryExact(ha, szArchivedName, lcLocale)) != NULL) { if(dwFlags & MPQ_FILE_REPLACEEXISTING) { hf->pBlockEx = ha->pExtBlockTable + hf->pHash->dwBlockIndex; hf->pBlock = ha->pBlockTable + hf->pHash->dwBlockIndex; } else { nError = ERROR_ALREADY_EXISTS; hf->pHash = NULL; } } if(nError == ERROR_SUCCESS && hf->pHash == NULL) { hf->pHash = FindFreeHashEntry(ha, szArchivedName); if(hf->pHash == NULL) { nError = ERROR_DISK_FULL; } } // Set the hash index hf->dwHashIndex = (DWORD)(hf->pHash - ha->pHashTable); hf->bIsWriteHandle = true; } // Find a free space in the MPQ, as well as free block table entry if(nError == ERROR_SUCCESS) { DWORD dwFreeBlock = FindFreeMpqSpace(ha, &hf->MpqFilePos); // Calculate the raw file offset hf->RawFilePos.QuadPart = ha->MpqPos.QuadPart + hf->MpqFilePos.QuadPart; // When format V1, the size of the archive cannot exceed 4 GB if(ha->pHeader->wFormatVersion == MPQ_FORMAT_VERSION_1) { TempPos.QuadPart = hf->MpqFilePos.QuadPart + dwFileSize; TempPos.QuadPart += ha->pHeader->dwHashTableSize * sizeof(TMPQHash); TempPos.QuadPart += ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock); TempPos.QuadPart += ha->pHeader->dwBlockTableSize * sizeof(TMPQBlockEx); if(TempPos.HighPart != 0) nError = ERROR_DISK_FULL; } // If we didn't get a block table entry assigned from hash table, assign it now if(hf->pBlock == NULL) { // Note: dwFreeBlock can be greater than dwHashTableSize, // in case that block table is bigger than hash table if(dwFreeBlock != 0xFFFFFFFF) { hf->pBlockEx = ha->pExtBlockTable + dwFreeBlock; hf->pBlock = ha->pBlockTable + dwFreeBlock; } else { nError = ERROR_DISK_FULL; } } // Calculate the index to the block table hf->dwBlockIndex = (DWORD)(hf->pBlock - ha->pBlockTable); } // Create key for file encryption if(nError == ERROR_SUCCESS && (dwFlags & MPQ_FILE_ENCRYPTED)) { szArchivedName = GetPlainMpqFileName(szArchivedName); hf->dwFileKey = DecryptFileKey(szArchivedName); if(dwFlags & MPQ_FILE_FIX_KEY) hf->dwFileKey = (hf->dwFileKey + hf->MpqFilePos.LowPart) ^ dwFileSize; } if(nError == ERROR_SUCCESS) { // Initialize the hash entry for the file hf->pHash->dwBlockIndex = hf->dwBlockIndex; hf->pHash->lcLocale = (USHORT)lcLocale; // Initialize the block table entry for the file hf->pBlockEx->wFilePosHigh = (USHORT)hf->MpqFilePos.HighPart; hf->pBlock->dwFilePos = hf->MpqFilePos.LowPart; hf->pBlock->dwFSize = dwFileSize; hf->pBlock->dwCSize = 0; hf->pBlock->dwFlags = dwFlags | MPQ_FILE_EXISTS; // Resolve CRC32 and MD5 entry for the file // Only do it when the MPQ archive has attributes if(ha->pAttributes != NULL) { hf->pFileTime = ha->pAttributes->pFileTime + hf->dwBlockIndex; hf->pCrc32 = ha->pAttributes->pCrc32 + hf->dwBlockIndex; hf->pMd5 = ha->pAttributes->pMd5 + hf->dwBlockIndex; // If the file has been overwritten, there still might be // stale entries in the attributes memset(hf->pFileTime, 0, sizeof(TMPQFileTime)); memset(hf->pMd5, 0, sizeof(TMPQMD5)); hf->pCrc32[0] = 0; // Initialize the file time, CRC32 and MD5 assert(sizeof(hf->hctx) >= sizeof(hash_state)); tommd5_init((hash_state *)hf->hctx); hf->dwCrc32 = crc32(0, Z_NULL, 0); // If the caller gave us a file time, use it. if(pFT != NULL) *hf->pFileTime = *pFT; } // Call the callback, if needed if(AddFileCB != NULL) AddFileCB(pvUserData, 0, hf->pBlock->dwFSize, false); } // If an error occured, remember it if(nError != ERROR_SUCCESS) hf->bErrorOccured = true; *phf = hf; return nError; }
int SFileAddFile_Write(TMPQFile * hf, const void * pvData, DWORD dwSize, DWORD dwCompression) { TMPQArchive * ha; TFileEntry * pFileEntry; int nError = ERROR_SUCCESS; // Don't bother if the caller gave us zero size if(pvData == NULL || dwSize == 0) return ERROR_SUCCESS; // Get pointer to the MPQ archive pFileEntry = hf->pFileEntry; ha = hf->ha; // Allocate file buffers if(hf->pbFileSector == NULL) { ULONGLONG RawFilePos = hf->RawFilePos; // Allocate buffer for file sector hf->nAddFileError = nError = AllocateSectorBuffer(hf); if(nError != ERROR_SUCCESS) return nError; // Allocate patch info, if the data is patch if(hf->pPatchInfo == NULL && IsIncrementalPatchFile(pvData, dwSize, &hf->dwPatchedFileSize)) { // Set the MPQ_FILE_PATCH_FILE flag hf->pFileEntry->dwFlags |= MPQ_FILE_PATCH_FILE; // Allocate the patch info hf->nAddFileError = nError = AllocatePatchInfo(hf, false); if(nError != ERROR_SUCCESS) return nError; } // Allocate sector offsets if(hf->SectorOffsets == NULL) { hf->nAddFileError = nError = AllocateSectorOffsets(hf, false); if(nError != ERROR_SUCCESS) return nError; } // Create array of sector checksums if(hf->SectorChksums == NULL && (pFileEntry->dwFlags & MPQ_FILE_SECTOR_CRC)) { hf->nAddFileError = nError = AllocateSectorChecksums(hf, false); if(nError != ERROR_SUCCESS) return nError; } // Pre-save the patch info, if any if(hf->pPatchInfo != NULL) { if(!FileStream_Write(ha->pStream, &RawFilePos, hf->pPatchInfo, hf->pPatchInfo->dwLength)) nError = GetLastError(); pFileEntry->dwCmpSize += hf->pPatchInfo->dwLength; RawFilePos += hf->pPatchInfo->dwLength; } // Pre-save the sector offset table, just to reserve space in the file. // Note that we dont need to swap the sector positions, nor encrypt the table // at the moment, as it will be written again after writing all file sectors. if(hf->SectorOffsets != NULL) { if(!FileStream_Write(ha->pStream, &RawFilePos, hf->SectorOffsets, hf->SectorOffsets[0])) nError = GetLastError(); pFileEntry->dwCmpSize += hf->SectorOffsets[0]; RawFilePos += hf->SectorOffsets[0]; } } // Write the MPQ data to the file if(nError == ERROR_SUCCESS) { // Save the first sector compression to the file structure // Note that the entire first file sector will be compressed // by compression that was passed to the first call of SFileAddFile_Write if(hf->dwFilePos == 0) hf->dwCompression0 = dwCompression; // Write the data to the MPQ nError = WriteDataToMpqFile(ha, hf, (LPBYTE)pvData, dwSize, dwCompression); } // If it succeeded and we wrote all the file data, // we need to re-save sector offset table if(nError == ERROR_SUCCESS) { if(hf->dwFilePos >= pFileEntry->dwFileSize) { // Finish calculating CRC32 hf->pFileEntry->dwCrc32 = hf->dwCrc32; // Finish calculating MD5 md5_done((hash_state *)hf->hctx, hf->pFileEntry->md5); // If we also have sector checksums, write them to the file if(hf->SectorChksums != NULL) { nError = WriteSectorChecksums(hf); } // Now write patch info if(hf->pPatchInfo != NULL) { memcpy(hf->pPatchInfo->md5, hf->pFileEntry->md5, MD5_DIGEST_SIZE); hf->pPatchInfo->dwDataSize = hf->pFileEntry->dwFileSize; hf->pFileEntry->dwFileSize = hf->dwPatchedFileSize; nError = WritePatchInfo(hf); } // Now write sector offsets to the file if(hf->SectorOffsets != NULL) { nError = WriteSectorOffsets(hf); } // Write the MD5 hashes of each file chunk, if required if(ha->pHeader->dwRawChunkSize != 0) { nError = WriteMpqDataMD5(ha->pStream, ha->MpqPos + hf->pFileEntry->ByteOffset, hf->pFileEntry->dwCmpSize, ha->pHeader->dwRawChunkSize); } } } // Store the error code from the Write File operation hf->nAddFileError = nError; return nError; }
int EXPORT_SYMBOL SFileCompactArchive(void * hMpq, const char * szListFile, int bReserved) { TFileStream * pTempStream = NULL; TMPQArchive * ha = (TMPQArchive *)hMpq; uint64_t ByteOffset; uint64_t ByteCount; uint32_t * pFileKeys = NULL; char szTempFile[1024] = ""; char * szTemp = NULL; int nError = ERROR_SUCCESS; /* Test the valid parameters */ if(!IsValidMpqHandle(hMpq)) nError = ERROR_INVALID_HANDLE; if(ha->dwFlags & MPQ_FLAG_READ_ONLY) nError = ERROR_ACCESS_DENIED; /* If the MPQ is changed at this moment, we have to flush the archive */ if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED)) { SFileFlushArchive(hMpq); } /* Create the table with file keys */ if(nError == ERROR_SUCCESS) { if((pFileKeys = STORM_ALLOC(uint32_t, ha->dwFileTableSize)) != NULL) memset(pFileKeys, 0, sizeof(uint32_t) * ha->dwFileTableSize); else nError = ERROR_NOT_ENOUGH_MEMORY; } /* First of all, we have to check of we are able to decrypt all files. */ /* If not, sorry, but the archive cannot be compacted. */ if(nError == ERROR_SUCCESS) { /* Initialize the progress variables for compact callback */ FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes)); ha->CompactBytesProcessed = 0; nError = CheckIfAllKeysKnown(ha, szListFile, pFileKeys); } /* Get the temporary file name and create it */ if(nError == ERROR_SUCCESS) { strcpy(szTempFile, FileStream_GetFileName(ha->pStream)); if((szTemp = strrchr(szTempFile, '.')) != NULL) strcpy(szTemp + 1, "mp_"); else strcat(szTempFile, "_"); pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE); if(pTempStream == NULL) nError = GetLastError(); } /* Write the data before MPQ user data (if any) */ if(nError == ERROR_SUCCESS && ha->UserDataPos != 0) { /* Inform the application about the progress */ if(ha->pfnCompactCB != NULL) ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes); ByteOffset = 0; ByteCount = ha->UserDataPos; nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount); } /* Write the MPQ user data (if any) */ if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos) { /* At this point, we assume that the user data size is equal */ /* to pUserData->dwHeaderOffs. */ /* If this assumption doesn't work, then we have an unknown version of MPQ */ ByteOffset = ha->UserDataPos; ByteCount = ha->MpqPos - ha->UserDataPos; assert(ha->pUserData != NULL); assert(ha->pUserData->dwHeaderOffs == ByteCount); nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount); } /* Write the MPQ header */ if(nError == ERROR_SUCCESS) { TMPQHeader SaveMpqHeader; /* Write the MPQ header to the file */ memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3); BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4); if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize)) nError = GetLastError(); /* Update the progress */ ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize; } /* Now copy all files */ if(nError == ERROR_SUCCESS) nError = CopyMpqFiles(ha, pFileKeys, pTempStream); /* If succeeded, switch the streams */ if(nError == ERROR_SUCCESS) { ha->dwFlags |= MPQ_FLAG_CHANGED; if(FileStream_Replace(ha->pStream, pTempStream)) pTempStream = NULL; else nError = ERROR_CAN_NOT_COMPLETE; } /* Final user notification */ if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash)); ha->CompactBytesProcessed += (ha->dwFileTableSize * sizeof(TMPQBlock)); ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes); } /* Cleanup and return */ if(pTempStream != NULL) FileStream_Close(pTempStream); if(pFileKeys != NULL) STORM_FREE(pFileKeys); if(nError != ERROR_SUCCESS) SetLastError(nError); return (nError == ERROR_SUCCESS); }
int ExtractLocaleFile(int MPQId, const char * szArchivedFile, const char * szFileName) { HANDLE hFile = NULL; // Archived file handle TFileStream* handle = NULL; // Disk file handle int nError = ERROR_SUCCESS; // Result value // Open a file in the archive, e.g. "data\global\music\Act1\tristram.wav" if(nError == ERROR_SUCCESS) { if(!SFileOpenFileEx(localeMPQ[MPQId], szArchivedFile, SFILE_OPEN_PATCHED_FILE, &hFile)) nError = GetLastError(); } // Create the target file if(nError == ERROR_SUCCESS) { //handle = CreateFile(szFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, 0, NULL); //if(handle == INVALID_HANDLE_VALUE) // nError = GetLastError(); handle = FileStream_CreateFile(szFileName); if(handle == NULL) nError = GetLastError(); } /* // Get the size of the full patched file dwFileSize = SFileGetFileSize(hFile, NULL); if(dwFileSize != 0) { // Allocate space for the full file pbFullFile = new BYTE[dwFileSize]; if(pbFullFile != NULL) { if(!SFileReadFile(hFile, pbFullFile, dwFileSize)) { nError = GetLastError(); printf("Failed to read full patched file data \"%s\"\n", szFileName); } if(nError == ERROR_SUCCESS) { strcpy(szLocFileName, MAKE_PATH("Work//")); strcat(szLocFileName, GetPlainName(szFileName)); pStream = FileStream_CreateFile(szLocFileName); if(pStream != NULL) { FileStream_Write(pStream, NULL, pbFullFile, dwFileSize); FileStream_Close(pStream); } } delete [] pbFullFile; } } */ // Read the file from the archive if(nError == ERROR_SUCCESS) { // Get the size of the full patched file DWORD dwFileSize = SFileGetFileSize(hFile, NULL); if(dwFileSize != 0) { // Allocate space for the full file BYTE * pbFullFile = new BYTE[dwFileSize]; if(!SFileReadFile(hFile, pbFullFile, dwFileSize)) { nError = GetLastError(); printf("Failed to read full patched file data \"%s\"\n", szFileName); assert(false); } FileStream_Write(handle, NULL, pbFullFile, dwFileSize); delete [] pbFullFile; } } // Cleanup and exit if(handle != NULL) FileStream_Close(handle); if(hFile != NULL) SFileCloseFile(hFile); return nError; }
// Copies all file sectors into another archive. static int CopyMpqFileSectors( TMPQArchive * ha, TMPQFile * hf, TFileStream * pNewStream) { TFileEntry * pFileEntry = hf->pFileEntry; ULONGLONG RawFilePos; // Used for calculating sector offset in the old MPQ archive ULONGLONG MpqFilePos; // MPQ file position in the new archive DWORD dwBytesToCopy = pFileEntry->dwCmpSize; DWORD dwPatchSize = 0; // Size of patch header DWORD dwFileKey1 = 0; // File key used for decryption DWORD dwFileKey2 = 0; // File key used for encryption DWORD dwCmpSize = 0; // Compressed file size, including patch header int nError = ERROR_SUCCESS; // Remember the position in the destination file FileStream_GetPos(pNewStream, &MpqFilePos); MpqFilePos -= ha->MpqPos; // Resolve decryption keys. Note that the file key given // in the TMPQFile structure also includes the key adjustment if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)) { dwFileKey2 = dwFileKey1 = hf->dwFileKey; if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY) { dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (DWORD)pFileEntry->ByteOffset; dwFileKey2 = (dwFileKey2 + (DWORD)MpqFilePos) ^ pFileEntry->dwFileSize; } } // If we have to save patch header, do it if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL) { BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(DWORD) * 3); if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength)) nError = GetLastError(); // Save the size of the patch info dwPatchSize = hf->pPatchInfo->dwLength; } // If we have to save sector offset table, do it. if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL) { DWORD * SectorOffsetsCopy = STORM_ALLOC(DWORD, hf->SectorOffsets[0] / sizeof(DWORD)); DWORD dwSectorOffsLen = hf->SectorOffsets[0]; assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0); assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK); if(SectorOffsetsCopy == NULL) nError = ERROR_NOT_ENOUGH_MEMORY; // Encrypt the secondary sector offset table and write it to the target file if(nError == ERROR_SUCCESS) { memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen); if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1); BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen); if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen)) nError = GetLastError(); dwBytesToCopy -= dwSectorOffsLen; dwCmpSize += dwSectorOffsLen; } // Update compact progress if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwSectorOffsLen; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } STORM_FREE(SectorOffsetsCopy); } // Now we have to copy all file sectors. We do it without // recompression, because recompression is not necessary in this case if(nError == ERROR_SUCCESS) { for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++) { DWORD dwRawDataInSector = hf->dwSectorSize; DWORD dwRawByteOffset = dwSector * hf->dwSectorSize; // Fix the raw data length if the file is compressed if(hf->SectorOffsets != NULL) { dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector]; dwRawByteOffset = hf->SectorOffsets[dwSector]; } // Last sector: If there is not enough bytes remaining in the file, cut the raw size if(dwRawDataInSector > dwBytesToCopy) dwRawDataInSector = dwBytesToCopy; // Calculate the raw file offset of the file sector CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset); // Read the file sector if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // If necessary, re-encrypt the sector // Note: Recompression is not necessary here. Unlike encryption, // the compression does not depend on the position of the file in MPQ. if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2) { BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector); EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector); BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector); } // Now write the sector back to the file if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector)) { nError = GetLastError(); break; } // Update compact progress if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwRawDataInSector; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } // Adjust byte counts dwBytesToCopy -= dwRawDataInSector; dwCmpSize += dwRawDataInSector; } } // Copy the sector CRCs, if any // Sector CRCs are always compressed (not imploded) and unencrypted if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL) { DWORD dwCrcLength; dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount]; if(dwCrcLength != 0) { if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength)) nError = GetLastError(); // Update compact progress if(ha->pfnCompactCB != NULL) { ha->CompactBytesProcessed += dwCrcLength; ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes); } // Size of the CRC block is also included in the compressed file size dwBytesToCopy -= dwCrcLength; dwCmpSize += dwCrcLength; } } // There might be extra data beyond sector checksum table // Sometimes, these data are even part of sector offset table // Examples: // 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc // 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml if(nError == ERROR_SUCCESS && dwBytesToCopy != 0) { LPBYTE pbExtraData; // Allocate space for the extra data pbExtraData = STORM_ALLOC(BYTE, dwBytesToCopy); if(pbExtraData != NULL) { if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy)) nError = GetLastError(); if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy)) nError = GetLastError(); // Include these extra data in the compressed size dwCmpSize += dwBytesToCopy; STORM_FREE(pbExtraData); } else nError = ERROR_NOT_ENOUGH_MEMORY; } // Write the MD5's of the raw file data, if needed if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0) { nError = WriteMpqDataMD5(pNewStream, ha->MpqPos + MpqFilePos, pFileEntry->dwCmpSize, ha->pHeader->dwRawChunkSize); } // Update file position in the block table if(nError == ERROR_SUCCESS) { // At this point, number of bytes written should be exactly // the same like the compressed file size. If it isn't, // there's something wrong (an unknown archive version, MPQ malformation, ...) // // Note: Diablo savegames have very weird layout, and the file "hero" // seems to have improper compressed size. Instead of real compressed size, // the "dwCmpSize" member of the block table entry contains // uncompressed size of file data + size of the sector table. // If we compact the archive, Diablo will refuse to load the game // // Note: Some patch files in WOW patches don't count the patch header // into compressed size // if(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize) { // Note: DO NOT update the compressed size in the file entry, no matter how bad it is. pFileEntry->ByteOffset = MpqFilePos; } else { nError = ERROR_FILE_CORRUPT; assert(false); } } return nError; }