bool WINAPI SFileCreateArchive(const TCHAR * szMpqName, DWORD dwFlags, DWORD dwMaxFileCount, HANDLE * phMpq)
{
    TFileStream * pStream = NULL;           // File stream
    TMPQArchive * ha = NULL;                // MPQ archive handle
    ULONGLONG MpqPos = 0;                   // Position of MPQ header in the file
    HANDLE hMpq = NULL;
    USHORT wFormatVersion = MPQ_FORMAT_VERSION_1;
    DWORD dwBlockTableSize = 0;             // Initial block table size
    DWORD dwHashTableSize = 0;
    int nError = ERROR_SUCCESS;

    // Check the parameters, if they are valid
    if(szMpqName == NULL || *szMpqName == 0 || phMpq == NULL)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();

    // We verify if the file already exists and if it's a MPQ archive.
    // If yes, we won't allow to overwrite it.
    if(SFileOpenArchive(szMpqName, 0, dwFlags, &hMpq))
    {
        SFileCloseArchive(hMpq);
        SetLastError(ERROR_ALREADY_EXISTS);
        return false;
    }

    //
    // At this point, we have to create the archive.
    // - If the file exists, convert it to MPQ archive.
    // - If the file doesn't exist, create new empty file
    //

    pStream = FileStream_OpenFile(szMpqName, true);
    if(pStream == NULL)
    {
        pStream = FileStream_CreateFile(szMpqName);
        if(pStream == NULL)
            return false;
    }

    // Decide what format to use
    wFormatVersion = (USHORT)((dwFlags & MPQ_CREATE_ARCHIVE_VMASK) >> 16);
    if(wFormatVersion > MPQ_FORMAT_VERSION_4)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // Increment the maximum amount of files to have space
    // for listfile and attributes file
    if(dwFlags & MPQ_CREATE_ATTRIBUTES)
        dwMaxFileCount++;
    dwMaxFileCount++;

    // If file count is not zero, initialize the hash table size
    dwHashTableSize = GetHashTableSizeForFileCount(dwMaxFileCount);

    // Retrieve the file size and round it up to 0x200 bytes
    FileStream_GetSize(pStream, MpqPos);
    MpqPos = (MpqPos + 0x1FF) & (ULONGLONG)0xFFFFFFFFFFFFFE00ULL;
    if(!FileStream_SetSize(pStream, MpqPos))
        nError = GetLastError();

#ifdef _DEBUG    
    // Debug code, used for testing StormLib
//  dwBlockTableSize = dwHashTableSize * 2;
#endif

    // Create the archive handle
    if(nError == ERROR_SUCCESS)
    {
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Fill the MPQ archive handle structure
    if(nError == ERROR_SUCCESS)
    {
        memset(ha, 0, sizeof(TMPQArchive));
        ha->pStream         = pStream;
        ha->dwSectorSize    = (wFormatVersion >= MPQ_FORMAT_VERSION_3) ? 0x4000 : 0x1000;
        ha->UserDataPos     = MpqPos;
        ha->MpqPos          = MpqPos;
        ha->pHeader         = (TMPQHeader *)ha->HeaderData;
        ha->dwMaxFileCount  = dwMaxFileCount;
        ha->dwFileTableSize = 0;
        ha->dwFileFlags1    = MPQ_FILE_ENCRYPTED | MPQ_FILE_COMPRESS |  MPQ_FILE_REPLACEEXISTING;
        ha->dwFileFlags2    = MPQ_FILE_ENCRYPTED | MPQ_FILE_COMPRESS |  MPQ_FILE_REPLACEEXISTING;
        ha->dwFlags         = 0;

        // Setup the attributes
        if(dwFlags & MPQ_CREATE_ATTRIBUTES)
            ha->dwAttrFlags = MPQ_ATTRIBUTE_CRC32 | MPQ_ATTRIBUTE_FILETIME | MPQ_ATTRIBUTE_MD5;
        pStream = NULL;
    }

    // Fill the MPQ header
    if(nError == ERROR_SUCCESS)
    {
        TMPQHeader * pHeader = ha->pHeader;

        // Fill the MPQ header
        memset(pHeader, 0, sizeof(ha->HeaderData));
        pHeader->dwID             = ID_MPQ;
        pHeader->dwHeaderSize     = MpqHeaderSizes[wFormatVersion];
        pHeader->dwArchiveSize    = pHeader->dwHeaderSize + dwHashTableSize * sizeof(TMPQHash);
        pHeader->wFormatVersion   = wFormatVersion;
        pHeader->wSectorSize      = GetSectorSizeShift(ha->dwSectorSize);
        pHeader->dwHashTablePos   = pHeader->dwHeaderSize;
        pHeader->dwHashTableSize  = dwHashTableSize;
        pHeader->dwBlockTablePos  = pHeader->dwHashTablePos + dwHashTableSize * sizeof(TMPQHash);
        pHeader->dwBlockTableSize = dwBlockTableSize;

        // For MPQs version 4 and higher, we set the size of raw data block
        // for calculating MD5
        if(wFormatVersion >= MPQ_FORMAT_VERSION_4)
            pHeader->dwRawChunkSize = 0x4000;

        // Write the naked MPQ header
        nError = WriteNakedMPQHeader(ha);

        // Remember that the (listfile) and (attributes) need to be saved
        ha->dwFlags |= MPQ_FLAG_CHANGED | MPQ_FLAG_INV_LISTFILE | MPQ_FLAG_INV_ATTRIBUTES;
    }

    // Create initial HET table, if the caller required an MPQ format 3.0 or newer
    if(nError == ERROR_SUCCESS && wFormatVersion >= MPQ_FORMAT_VERSION_3)
    {
        ha->pHetTable = CreateHetTable(ha->dwMaxFileCount, 0x40, true);
        if(ha->pHetTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Create initial hash table
    if(nError == ERROR_SUCCESS)
    {
        nError = CreateHashTable(ha, dwHashTableSize);
    }

    // Create initial file table
    if(nError == ERROR_SUCCESS)
    {
        ha->pFileTable = STORM_ALLOC(TFileEntry, ha->dwMaxFileCount);
        if(ha->pFileTable != NULL)
            memset(ha->pFileTable, 0x00, sizeof(TFileEntry) * ha->dwMaxFileCount);
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Cleanup : If an error, delete all buffers and return
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeMPQArchive(ha);
        SetLastError(nError);
        ha = NULL;
    }
    
    // Return the values
    *phMpq = (HANDLE)ha;
    return (nError == ERROR_SUCCESS);
}
// Copies all file sectors into another archive.
static int CopyMpqFileSectors(
    TMPQArchive * ha,
    TMPQFile * hf,
    TFileStream * pNewStream)
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    ULONGLONG RawFilePos;               // Used for calculating sector offset in the old MPQ archive
    ULONGLONG MpqFilePos;               // MPQ file position in the new archive
    DWORD dwBytesToCopy = pFileEntry->dwCmpSize;
    DWORD dwPatchSize = 0;              // Size of patch header
    DWORD dwFileKey1 = 0;               // File key used for decryption
    DWORD dwFileKey2 = 0;               // File key used for encryption
    DWORD dwCmpSize = 0;                // Compressed file size, including patch header
    int nError = ERROR_SUCCESS;

    // Remember the position in the destination file
    FileStream_GetPos(pNewStream, &MpqFilePos);
    MpqFilePos -= ha->MpqPos;

    // Resolve decryption keys. Note that the file key given 
    // in the TMPQFile structure also includes the key adjustment
    if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED))
    {
        dwFileKey2 = dwFileKey1 = hf->dwFileKey;
        if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
        {
            dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (DWORD)pFileEntry->ByteOffset;
            dwFileKey2 = (dwFileKey2 + (DWORD)MpqFilePos) ^ pFileEntry->dwFileSize;
        }
    }

    // If we have to save patch header, do it
    if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL)
    {
        BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(DWORD) * 3);
        if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength))
            nError = GetLastError();

        // Save the size of the patch info
        dwPatchSize = hf->pPatchInfo->dwLength;
    }

    // If we have to save sector offset table, do it.
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL)
    {
        DWORD * SectorOffsetsCopy = STORM_ALLOC(DWORD, hf->SectorOffsets[0] / sizeof(DWORD));
        DWORD dwSectorOffsLen = hf->SectorOffsets[0];

        assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0);
        assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK);

        if(SectorOffsetsCopy == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;

        // Encrypt the secondary sector offset table and write it to the target file
        if(nError == ERROR_SUCCESS)
        {
            memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
            if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1);

            BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
            if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen))
                nError = GetLastError();

            dwBytesToCopy -= dwSectorOffsLen;
            dwCmpSize += dwSectorOffsLen;
        }

        // Update compact progress
        if(ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += dwSectorOffsLen;
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }

        STORM_FREE(SectorOffsetsCopy);
    }

    // Now we have to copy all file sectors. We do it without
    // recompression, because recompression is not necessary in this case
    if(nError == ERROR_SUCCESS)
    {
        for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            DWORD dwRawDataInSector = hf->dwSectorSize;
            DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;

            // Fix the raw data length if the file is compressed
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            // Last sector: If there is not enough bytes remaining in the file, cut the raw size
            if(dwRawDataInSector > dwBytesToCopy)
                dwRawDataInSector = dwBytesToCopy;

            // Calculate the raw file offset of the file sector
            CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset);
            
            // Read the file sector
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // If necessary, re-encrypt the sector
            // Note: Recompression is not necessary here. Unlike encryption, 
            // the compression does not depend on the position of the file in MPQ.
            if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2)
            {
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector);
                EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector);
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
            }

            // Now write the sector back to the file
            if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // Update compact progress
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwRawDataInSector;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            // Adjust byte counts
            dwBytesToCopy -= dwRawDataInSector;
            dwCmpSize += dwRawDataInSector;
        }
    }

    // Copy the sector CRCs, if any
    // Sector CRCs are always compressed (not imploded) and unencrypted
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL)
    {
        DWORD dwCrcLength;

        dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount];
        if(dwCrcLength != 0)
        {
            if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            // Update compact progress
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwCrcLength;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            // Size of the CRC block is also included in the compressed file size
            dwBytesToCopy -= dwCrcLength;
            dwCmpSize += dwCrcLength;
        }
    }

    // There might be extra data beyond sector checksum table
    // Sometimes, these data are even part of sector offset table
    // Examples:
    // 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc
    // 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml
    if(nError == ERROR_SUCCESS && dwBytesToCopy != 0)
    {
        LPBYTE pbExtraData;

        // Allocate space for the extra data
        pbExtraData = STORM_ALLOC(BYTE, dwBytesToCopy);
        if(pbExtraData != NULL)
        {
            if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            // Include these extra data in the compressed size
            dwCmpSize += dwBytesToCopy;
            STORM_FREE(pbExtraData);
        }
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Write the MD5's of the raw file data, if needed
    if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0)
    {
        nError = WriteMpqDataMD5(pNewStream, 
                                 ha->MpqPos + MpqFilePos,
                                 pFileEntry->dwCmpSize,
                                 ha->pHeader->dwRawChunkSize);
    }

    // Update file position in the block table
    if(nError == ERROR_SUCCESS)
    {
        // At this point, number of bytes written should be exactly
        // the same like the compressed file size. If it isn't,
        // there's something wrong (an unknown archive version, MPQ malformation, ...)
        // 
        // Note: Diablo savegames have very weird layout, and the file "hero"
        // seems to have improper compressed size. Instead of real compressed size,
        // the "dwCmpSize" member of the block table entry contains
        // uncompressed size of file data + size of the sector table.
        // If we compact the archive, Diablo will refuse to load the game
        //
        // Note: Some patch files in WOW patches don't count the patch header
        // into compressed size
        //

        if(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize)
        {
            // Note: DO NOT update the compressed size in the file entry, no matter how bad it is.
            pFileEntry->ByteOffset = MpqFilePos;
        }
        else
        {
            nError = ERROR_FILE_CORRUPT;
            assert(false);
        }
    }

    return nError;
}
Beispiel #3
0
bool WINAPI SFileOpenFileEx(HANDLE hMpq, const char * szFileName, DWORD dwSearchScope, HANDLE * phFile)
{
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    TFileEntry  * pFileEntry = NULL;
    TMPQFile    * hf = NULL;
    DWORD dwFileIndex = 0;
    bool bOpenByIndex = false;
    int nError = ERROR_SUCCESS;

    // Don't accept NULL pointer to file handle
    if(phFile == NULL)
        nError = ERROR_INVALID_PARAMETER;

    // Prepare the file opening
    if(nError == ERROR_SUCCESS)
    {
        switch(dwSearchScope)
        {
            case SFILE_OPEN_FROM_MPQ:
            case SFILE_OPEN_BASE_FILE:
                
                if(!IsValidMpqHandle(ha))
                {
                    nError = ERROR_INVALID_HANDLE;
                    break;
                }

                if(szFileName == NULL || *szFileName == 0)
                {
                    nError = ERROR_INVALID_PARAMETER;
                    break;
                }

                // Check the pseudo-file name
                if(IsPseudoFileName(szFileName, &dwFileIndex))
                {
                    pFileEntry = GetFileEntryByIndex(ha, dwFileIndex);
                    bOpenByIndex = true;
                    if(pFileEntry == NULL)
                        nError = ERROR_FILE_NOT_FOUND;
                }
                else
                {
                    // If this MPQ is a patched archive, open the file as patched
                    if(ha->haPatch == NULL || dwSearchScope == SFILE_OPEN_BASE_FILE)
                    {
                        // Otherwise, open the file from *this* MPQ
                        pFileEntry = GetFileEntryLocale(ha, szFileName, lcFileLocale);
                        if(pFileEntry == NULL)
                            nError = ERROR_FILE_NOT_FOUND;
                    }
                    else
                    {
                        return OpenPatchedFile(hMpq, szFileName, 0, phFile);
                    }
                }
                break;

            case SFILE_OPEN_ANY_LOCALE:

                // This open option is reserved for opening MPQ internal listfile.
                // No argument validation. Tries to open file with neutral locale first,
                // then any other available.
                pFileEntry = GetFileEntryAny(ha, szFileName);
                if(pFileEntry == NULL)
                    nError = ERROR_FILE_NOT_FOUND;
                break;

            case SFILE_OPEN_LOCAL_FILE:

                if(szFileName == NULL || *szFileName == 0)
                {
                    nError = ERROR_INVALID_PARAMETER;
                    break;
                }

                return OpenLocalFile(szFileName, phFile); 

            default:

                // Don't accept any other value
                nError = ERROR_INVALID_PARAMETER;
                break;
        }

        // Quick return if something failed
        if(nError != ERROR_SUCCESS)
        {
            SetLastError(nError);
            return false;
        }
    }

    // Test if the file was not already deleted.
    if(nError == ERROR_SUCCESS)
    {
        if((pFileEntry->dwFlags & MPQ_FILE_EXISTS) == 0)
            nError = ERROR_FILE_NOT_FOUND;
        if(pFileEntry->dwFlags & ~MPQ_FILE_VALID_FLAGS)
            nError = ERROR_NOT_SUPPORTED;
    }

    // Allocate file handle
    if(nError == ERROR_SUCCESS)
    {
        if((hf = STORM_ALLOC(TMPQFile, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Initialize file handle
    if(nError == ERROR_SUCCESS)
    {
        memset(hf, 0, sizeof(TMPQFile));
        hf->pFileEntry = pFileEntry;
        hf->dwMagic = ID_MPQ_FILE;
        hf->ha = ha;

        hf->MpqFilePos   = pFileEntry->ByteOffset;
        hf->RawFilePos   = ha->MpqPos + hf->MpqFilePos;
        hf->dwDataSize   = pFileEntry->dwFileSize;

        // If the MPQ has sector CRC enabled, enable if for the file
        if(ha->dwFlags & MPQ_FLAG_CHECK_SECTOR_CRC)
            hf->bCheckSectorCRCs = true;

        // If we know the real file name, copy it to the file entry
        if(bOpenByIndex == false)
        {
            // If there is no file name yet, allocate it
            AllocateFileName(pFileEntry, szFileName);

            // If the file is encrypted, we should detect the file key
            if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
            {
                hf->dwFileKey = DecryptFileKey(szFileName,
                                               pFileEntry->ByteOffset,
                                               pFileEntry->dwFileSize,
                                               pFileEntry->dwFlags);
            }
        }
        else
        {
            // Try to auto-detect the file name
            if(!SFileGetFileName(hf, NULL))
                nError = GetLastError();
        }
    }

    // If the file is actually a patch file, we have to load the patch file header
    if(nError == ERROR_SUCCESS && pFileEntry->dwFlags & MPQ_FILE_PATCH_FILE)
    {
        assert(hf->pPatchInfo == NULL);
        nError = AllocatePatchInfo(hf, true);
    }

    // Cleanup
    if(nError != ERROR_SUCCESS)
    {
        SetLastError(nError);
        FreeMPQFile(hf);
    }

    *phFile = hf;
    return (nError == ERROR_SUCCESS);
}
bool WINAPI SFileSetMaxFileCount(HANDLE hMpq, DWORD dwMaxFileCount)
{
    TMPQHetTable * pOldHetTable = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    TFileEntry * pOldFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
    TFileEntry * pOldFileTable = NULL;
    TFileEntry * pOldFileEntry;
    TFileEntry * pFileEntry;
    TMPQHash * pOldHashTable = NULL;
    DWORD dwOldHashTableSize = 0;
    DWORD dwOldFileTableSize = 0;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if(!IsValidMpqHandle(ha))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    // The new limit must not be lower than the index of the last file entry in the table
    if(nError == ERROR_SUCCESS && ha->dwFileTableSize > dwMaxFileCount)
        nError = ERROR_DISK_FULL;

    // ALL file names must be known in order to be able
    // to rebuild hash table size
    if(nError == ERROR_SUCCESS)
    {
        nError = CheckIfAllFilesKnown(ha, NULL, NULL);
    }

    // If the MPQ has a hash table, then we relocate the hash table
    if(nError == ERROR_SUCCESS && ha->pHashTable != NULL)
    {
        // Save parameters for the current hash table
        dwOldHashTableSize = ha->pHeader->dwHashTableSize;
        pOldHashTable = ha->pHashTable;

        // Allocate new hash table
        ha->pHeader->dwHashTableSize = GetHashTableSizeForFileCount(dwMaxFileCount);
        ha->pHashTable = STORM_ALLOC(TMPQHash, ha->pHeader->dwHashTableSize);
        if(ha->pHashTable != NULL)
            memset(ha->pHashTable, 0xFF, ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // If the MPQ has HET table, allocate new one as well
    if(nError == ERROR_SUCCESS && ha->pHetTable != NULL)
    {
        // Save the original HET table
        pOldHetTable = ha->pHetTable;

        // Create new one
        ha->pHetTable = CreateHetTable(dwMaxFileCount, 0x40, true);
        if(ha->pHetTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now reallocate the file table
    if(nError == ERROR_SUCCESS)
    {
        // Save the current file table
        dwOldFileTableSize = ha->dwFileTableSize;
        pOldFileTable = ha->pFileTable;

        // Create new one
        ha->pFileTable = STORM_ALLOC(TFileEntry, dwMaxFileCount);
        if(ha->pFileTable != NULL)
            memset(ha->pFileTable, 0, dwMaxFileCount * sizeof(TFileEntry));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now we have to build both classic hash table and HET table.
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwFileIndex = 0;
        DWORD dwHashIndex = 0;

        // Create new hash and HET entry for each file
        pFileEntry = ha->pFileTable;
        for(pOldFileEntry = pOldFileTable; pOldFileEntry < pOldFileTableEnd; pOldFileEntry++)
        {
            if(pOldFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // Copy the old file entry to the new one
                memcpy(pFileEntry, pOldFileEntry, sizeof(TFileEntry));
                assert(pFileEntry->szFileName != NULL);
                
                // Create new entry in the hash table
                if(ha->pHashTable != NULL)
                {
                    dwHashIndex = AllocateHashEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Create new entry in the HET table, if needed
                if(ha->pHetTable != NULL)
                {
                    dwHashIndex = AllocateHetEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Move to the next file entry in the new table
                pFileEntry++;
                dwFileIndex++;
            }
        }
    }

    // Mark the archive as changed
    // Note: We always have to rebuild the (attributes) file due to file table change
    if(nError == ERROR_SUCCESS)
    {
        ha->dwMaxFileCount = dwMaxFileCount;
        InvalidateInternalFiles(ha);
    }
    else
    {
        // Revert the hash table
        if(ha->pHashTable != NULL && pOldHashTable != NULL)
        {
            STORM_FREE(ha->pHashTable);
            ha->pHeader->dwHashTableSize = dwOldHashTableSize;
            ha->pHashTable = pOldHashTable;
        }

        // Revert the HET table
        if(ha->pHetTable != NULL && pOldHetTable != NULL)
        {
            FreeHetTable(ha->pHetTable);
            ha->pHetTable = pOldHetTable;
        }

        // Revert the file table
        if(pOldFileTable != NULL)
        {
            STORM_FREE(ha->pFileTable);
            ha->pFileTable = pOldFileTable;
        }

        SetLastError(nError);
    }

    // Return the result
    return (nError == ERROR_SUCCESS);
}
Beispiel #5
0
bool WINAPI SFileOpenArchive(
    const TCHAR * szMpqName,
    DWORD dwPriority,
    DWORD dwFlags,
    HANDLE * phMpq)
{
    TMPQUserData * pUserData;
    TFileStream * pStream = NULL;       // Open file stream
    TMPQArchive * ha = NULL;            // Archive handle
    TFileEntry * pFileEntry;
    ULONGLONG FileSize = 0;             // Size of the file
    LPBYTE pbHeaderBuffer = NULL;       // Buffer for searching MPQ header
    DWORD dwStreamFlags = (dwFlags & STREAM_FLAGS_MASK);
    bool bIsWarcraft3Map = false;
    int nError = ERROR_SUCCESS;   

    // Verify the parameters
    if(szMpqName == NULL || *szMpqName == 0 || phMpq == NULL)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();
    dwPriority = dwPriority;

    // If not forcing MPQ v 1.0, also use file bitmap
    dwStreamFlags |= (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) ? 0 : STREAM_FLAG_USE_BITMAP;

    // Open the MPQ archive file
    pStream = FileStream_OpenFile(szMpqName, dwStreamFlags);
    if(pStream == NULL)
        return false;

    // Check the file size. There must be at least 0x20 bytes
    if(nError == ERROR_SUCCESS)
    {
        FileStream_GetSize(pStream, &FileSize);
        if(FileSize < MPQ_HEADER_SIZE_V1)
            nError = ERROR_BAD_FORMAT;
    }

    // Allocate the MPQhandle
    if(nError == ERROR_SUCCESS)
    {
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Allocate buffer for searching MPQ header
    if(nError == ERROR_SUCCESS)
    {
        pbHeaderBuffer = STORM_ALLOC(BYTE, HEADER_SEARCH_BUFFER_SIZE);
        if(pbHeaderBuffer == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Find the position of MPQ header
    if(nError == ERROR_SUCCESS)
    {
        ULONGLONG SearchOffset = 0;
        ULONGLONG EndOfSearch = FileSize;
        DWORD dwStrmFlags = 0;
        DWORD dwHeaderSize;
        DWORD dwHeaderID;
        bool bSearchComplete = false;

        memset(ha, 0, sizeof(TMPQArchive));
        ha->pfnHashString = HashStringSlash;
        ha->pStream = pStream;
        pStream = NULL;

        // Set the archive read only if the stream is read-only
        FileStream_GetFlags(ha->pStream, &dwStrmFlags);
        ha->dwFlags |= (dwStrmFlags & STREAM_FLAG_READ_ONLY) ? MPQ_FLAG_READ_ONLY : 0;

        // Also remember if we shall check sector CRCs when reading file
        ha->dwFlags |= (dwFlags & MPQ_OPEN_CHECK_SECTOR_CRC) ? MPQ_FLAG_CHECK_SECTOR_CRC : 0;

        // Also remember if this MPQ is a patch
        ha->dwFlags |= (dwFlags & MPQ_OPEN_PATCH) ? MPQ_FLAG_PATCH : 0;
       
        // Limit the header searching to about 130 MB of data
        if(EndOfSearch > 0x08000000)
            EndOfSearch = 0x08000000;

        // Find the offset of MPQ header within the file
        while(bSearchComplete == false && SearchOffset < EndOfSearch)
        {
            // Always read at least 0x1000 bytes for performance.
            // This is what Storm.dll (2002) does.
            DWORD dwBytesAvailable = HEADER_SEARCH_BUFFER_SIZE;
            DWORD dwInBufferOffset = 0;

            // Cut the bytes available, if needed
            if((FileSize - SearchOffset) < HEADER_SEARCH_BUFFER_SIZE)
                dwBytesAvailable = (DWORD)(FileSize - SearchOffset);

            // Read the eventual MPQ header
            if(!FileStream_Read(ha->pStream, &SearchOffset, pbHeaderBuffer, dwBytesAvailable))
            {
                nError = GetLastError();
                break;
            }

            // There are AVI files from Warcraft III with 'MPQ' extension.
            if(SearchOffset == 0)
            {
                if(IsAviFile((DWORD *)pbHeaderBuffer))
                {
                    nError = ERROR_AVI_FILE;
                    break;
                }

                bIsWarcraft3Map = IsWarcraft3Map((DWORD *)pbHeaderBuffer);
            }

            // Search the header buffer
            while(dwInBufferOffset < dwBytesAvailable)
            {
                // Copy the data from the potential header buffer to the MPQ header
                memcpy(ha->HeaderData, pbHeaderBuffer + dwInBufferOffset, sizeof(ha->HeaderData));

                // If there is the MPQ user data, process it
                // Note that Warcraft III does not check for user data, which is abused by many map protectors
                dwHeaderID = BSWAP_INT32_UNSIGNED(ha->HeaderData[0]);
                if(bIsWarcraft3Map == false && (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) == 0)
                {
                    if(ha->pUserData == NULL && dwHeaderID == ID_MPQ_USERDATA)
                    {
                        // Verify if this looks like a valid user data
                        pUserData = IsValidMpqUserData(SearchOffset, FileSize, ha->HeaderData);
                        if(pUserData != NULL)
                        {
                            // Fill the user data header
                            ha->UserDataPos = SearchOffset;
                            ha->pUserData = &ha->UserData;
                            memcpy(ha->pUserData, pUserData, sizeof(TMPQUserData));

                            // Continue searching from that position
                            SearchOffset += ha->pUserData->dwHeaderOffs;
                            break;
                        }
                    }
                }

                // There must be MPQ header signature. Note that STORM.dll from Warcraft III actually
                // tests the MPQ header size. It must be at least 0x20 bytes in order to load it
                // Abused by Spazzler Map protector. Note that the size check is not present
                // in Storm.dll v 1.00, so Diablo I code would load the MPQ anyway.
                dwHeaderSize = BSWAP_INT32_UNSIGNED(ha->HeaderData[1]);
                if(dwHeaderID == ID_MPQ && dwHeaderSize >= MPQ_HEADER_SIZE_V1)
                {
                    // Now convert the header to version 4
                    nError = ConvertMpqHeaderToFormat4(ha, SearchOffset, FileSize, dwFlags);
                    bSearchComplete = true;
                    break;
                }

                // Check for MPK archives (Longwu Online - MPQ fork)
                if(dwHeaderID == ID_MPK)
                {
                    // Now convert the MPK header to MPQ Header version 4
                    nError = ConvertMpkHeaderToFormat4(ha, FileSize, dwFlags);
                    bSearchComplete = true;
                    break;
                }

                // If searching for the MPQ header is disabled, return an error
                if(dwFlags & MPQ_OPEN_NO_HEADER_SEARCH)
                {
                    nError = ERROR_NOT_SUPPORTED;
                    bSearchComplete = true;
                    break;
                }

                // Move the pointers
                SearchOffset += 0x200;
                dwInBufferOffset += 0x200;
            }
        }

        // Did we identify one of the supported headers?
        if(nError == ERROR_SUCCESS)
        {
            // Set the user data position to the MPQ header, if none
            if(ha->pUserData == NULL)
                ha->UserDataPos = SearchOffset;

            // Set the position of the MPQ header
            ha->pHeader  = (TMPQHeader *)ha->HeaderData;
            ha->MpqPos   = SearchOffset;
            ha->FileSize = FileSize;

            // Sector size must be nonzero.
            if(SearchOffset >= FileSize || ha->pHeader->wSectorSize == 0)
                nError = ERROR_BAD_FORMAT;
        }
    }

    // Fix table positions according to format
    if(nError == ERROR_SUCCESS)
    {
        // Dump the header
//      DumpMpqHeader(ha->pHeader);

        // W3x Map Protectors use the fact that War3's Storm.dll ignores the MPQ user data,
        // and ignores the MPQ format version as well. The trick is to
        // fake MPQ format 2, with an improper hi-word position of hash table and block table
        // We can overcome such protectors by forcing opening the archive as MPQ v 1.0
        if(dwFlags & MPQ_OPEN_FORCE_MPQ_V1)
        {
            ha->pHeader->wFormatVersion = MPQ_FORMAT_VERSION_1;
            ha->pHeader->dwHeaderSize = MPQ_HEADER_SIZE_V1;
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;
            ha->pUserData = NULL;
        }

        // Both MPQ_OPEN_NO_LISTFILE or MPQ_OPEN_NO_ATTRIBUTES trigger read only mode
        if(dwFlags & (MPQ_OPEN_NO_LISTFILE | MPQ_OPEN_NO_ATTRIBUTES))
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;

        // Remember whether whis is a map for Warcraft III
        if(bIsWarcraft3Map)
            ha->dwFlags |= MPQ_FLAG_WAR3_MAP;

        // Set the size of file sector
        ha->dwSectorSize = (0x200 << ha->pHeader->wSectorSize);

        // Verify if any of the tables doesn't start beyond the end of the file
        nError = VerifyMpqTablePositions(ha, FileSize);
    }

    // Read the hash table. Ignore the result, as hash table is no longer required
    // Read HET table. Ignore the result, as HET table is no longer required
    if(nError == ERROR_SUCCESS)
    {
        nError = LoadAnyHashTable(ha);
    }

    // Now, build the file table. It will be built by combining
    // the block table, BET table, hi-block table, (attributes) and (listfile).
    if(nError == ERROR_SUCCESS)
    {
        nError = BuildFileTable(ha);
    }

    // Load the internal listfile and include it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_LISTFILE) == 0)
    {
        // Quick check for (listfile)
        pFileEntry = GetFileEntryLocale(ha, LISTFILE_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (listfile) is optional.
            SFileAddListFile((HANDLE)ha, NULL);
            ha->dwFileFlags1 = pFileEntry->dwFlags;
        }
    }

    // Load the "(attributes)" file and merge it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_ATTRIBUTES) == 0 && (ha->dwFlags & MPQ_FLAG_BLOCK_TABLE_CUT) == 0)
    {
        // Quick check for (attributes)
        pFileEntry = GetFileEntryLocale(ha, ATTRIBUTES_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (attributes) is optional.
            SAttrLoadAttributes(ha);
            ha->dwFileFlags2 = pFileEntry->dwFlags;
        }
    }

    // Remember whether the archive has weak signature. Only for MPQs format 1.0.
    if(nError == ERROR_SUCCESS)
    {
        // Quick check for (signature)
        pFileEntry = GetFileEntryLocale(ha, SIGNATURE_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Just remember that the archive is weak-signed
            assert((pFileEntry->dwFlags & MPQ_FILE_EXISTS) != 0);
            ha->dwFileFlags3 = pFileEntry->dwFlags;
        }

        // Finally, set the MPQ_FLAG_READ_ONLY if the MPQ was found malformed
        ha->dwFlags |= (ha->dwFlags & MPQ_FLAG_MALFORMED) ? MPQ_FLAG_READ_ONLY : 0;
    }

    // Cleanup and exit
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeArchiveHandle(ha);
        SetLastError(nError);
        ha = NULL;
    }

    // Free the header buffer
    if(pbHeaderBuffer != NULL)
        STORM_FREE(pbHeaderBuffer);
    if(phMpq != NULL)
        *phMpq = ha;
    return (nError == ERROR_SUCCESS);
}
/* Copies all file sectors into another archive. */
static int CopyMpqFileSectors(
    TMPQArchive * ha,
    TMPQFile * hf,
    TFileStream * pNewStream,
    uint64_t MpqFilePos)               /* MPQ file position in the new archive */
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    uint64_t RawFilePos;               /* Used for calculating sector offset in the old MPQ archive */
    uint32_t dwBytesToCopy = pFileEntry->dwCmpSize;
    uint32_t dwPatchSize = 0;              /* Size of patch header */
    uint32_t dwFileKey1 = 0;               /* File key used for decryption */
    uint32_t dwFileKey2 = 0;               /* File key used for encryption */
    uint32_t dwCmpSize = 0;                /* Compressed file size, including patch header */
    int nError = ERROR_SUCCESS;

    /* Resolve decryption keys. Note that the file key given */
    /* in the TMPQFile structure also includes the key adjustment */
    if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED))
    {
        dwFileKey2 = dwFileKey1 = hf->dwFileKey;
        if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
        {
            dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (uint32_t)pFileEntry->ByteOffset;
            dwFileKey2 = (dwFileKey2 + (uint32_t)MpqFilePos) ^ pFileEntry->dwFileSize;
        }
    }

    /* If we have to save patch header, do it */
    if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL)
    {
        BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(uint32_t) * 3);
        if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength))
            nError = GetLastError();

        /* Save the size of the patch info */
        dwPatchSize = hf->pPatchInfo->dwLength;
    }

    /* If we have to save sector offset table, do it. */
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL)
    {
        uint32_t * SectorOffsetsCopy = STORM_ALLOC(uint32_t, hf->SectorOffsets[0] / sizeof(uint32_t));
        uint32_t dwSectorOffsLen = hf->SectorOffsets[0];

        assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0);
        assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK);

        if(SectorOffsetsCopy == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;

        /* Encrypt the secondary sector offset table and write it to the target file */
        if(nError == ERROR_SUCCESS)
        {
            memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
            if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1);

            BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
            if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen))
                nError = GetLastError();

            dwBytesToCopy -= dwSectorOffsLen;
            dwCmpSize += dwSectorOffsLen;
        }

        /* Update compact progress */
        if(ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += dwSectorOffsLen;
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }

        STORM_FREE(SectorOffsetsCopy);
    }

    /* Now we have to copy all file sectors. We do it without */
    /* recompression, because recompression is not necessary in this case */
    if(nError == ERROR_SUCCESS)
    {
        uint32_t dwSector;
        
        for(dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            uint32_t dwRawDataInSector = hf->dwSectorSize;
            uint32_t dwRawByteOffset = dwSector * hf->dwSectorSize;

            /* Fix the raw data length if the file is compressed */
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            /* Last sector: If there is not enough bytes remaining in the file, cut the raw size */
            if(dwRawDataInSector > dwBytesToCopy)
                dwRawDataInSector = dwBytesToCopy;

            /* Calculate the raw file offset of the file sector */
            RawFilePos = CalculateRawSectorOffset(hf, dwRawByteOffset);
            
            /* Read the file sector */
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            /* If necessary, re-encrypt the sector */
            /* Note: Recompression is not necessary here. Unlike encryption, */
            /* the compression does not depend on the position of the file in MPQ. */
            if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2)
            {
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT)
                    DecryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent));

                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS)
                    DecryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis));
                
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector);
                EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector);
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS)
                    EncryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis));

                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT)
                    EncryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent));
            }

            /* Now write the sector back to the file */
            if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            /* Update compact progress */
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwRawDataInSector;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            /* Adjust byte counts */
            dwBytesToCopy -= dwRawDataInSector;
            dwCmpSize += dwRawDataInSector;
        }
    }

    /* Copy the sector CRCs, if any */
    /* Sector CRCs are always compressed (not imploded) and unencrypted */
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL)
    {
        uint32_t dwCrcLength;

        dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount];
        if(dwCrcLength != 0)
        {
            if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            /* Update compact progress */
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwCrcLength;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            /* Size of the CRC block is also included in the compressed file size */
            dwBytesToCopy -= dwCrcLength;
            dwCmpSize += dwCrcLength;
        }
    }

    /* There might be extra data beyond sector checksum table
     * Sometimes, these data are even part of sector offset table
     * Examples:
     * 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc
     * 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml
     */
    if(nError == ERROR_SUCCESS && dwBytesToCopy != 0)
    {
        unsigned char * pbExtraData;

        /* Allocate space for the extra data */
        pbExtraData = STORM_ALLOC(uint8_t, dwBytesToCopy);
        if(pbExtraData != NULL)
        {
            if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            /* Include these extra data in the compressed size */
            dwCmpSize += dwBytesToCopy;
            STORM_FREE(pbExtraData);
        }
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    /* Write the MD5's of the raw file data, if needed */
    if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0)
    {
        nError = WriteMpqDataMD5(pNewStream, 
                                 ha->MpqPos + MpqFilePos,
                                 pFileEntry->dwCmpSize,
                                 ha->pHeader->dwRawChunkSize);
    }

    /* Verify the number of bytes written */
    if(nError == ERROR_SUCCESS)
    {
        /* At this point, number of bytes written should be exactly
         * the same like the compressed file size. If it isn't,
         * there's something wrong (an unknown archive version, MPQ malformation, ...)
         * 
         * Note: Diablo savegames have very weird layout, and the file "hero"
         * seems to have improper compressed size. Instead of real compressed size,
         * the "dwCmpSize" member of the block table entry contains
         * uncompressed size of file data + size of the sector table.
         * If we compact the archive, Diablo will refuse to load the game
         *
         * Note: Some patch files in WOW patches don't count the patch header
         * into compressed size
         */

        if(!(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize))
        {
            nError = ERROR_FILE_CORRUPT;
            assert(0);
        }
    }

    return nError;
}
Beispiel #7
0
bool WINAPI SFileOpenArchive(
    const TCHAR * szMpqName,
    DWORD dwPriority,
    DWORD dwFlags,
    HANDLE * phMpq)
{
    TMPQUserData * pUserData;
    TFileStream * pStream = NULL;       // Open file stream
    TMPQArchive * ha = NULL;            // Archive handle
    TFileEntry * pFileEntry;
    ULONGLONG FileSize = 0;             // Size of the file
    int nError = ERROR_SUCCESS;   

    // Verify the parameters
    if(szMpqName == NULL || *szMpqName == 0 || phMpq == NULL)
        nError = ERROR_INVALID_PARAMETER;

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();
    dwPriority = dwPriority;

    // Open the MPQ archive file
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwStreamFlags = (dwFlags & STREAM_FLAGS_MASK);

        // If not forcing MPQ v 1.0, also use file bitmap
        dwStreamFlags |= (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) ? 0 : STREAM_FLAG_USE_BITMAP;

        // Initialize the stream
        pStream = FileStream_OpenFile(szMpqName, dwStreamFlags);
        if(pStream == NULL)
            nError = GetLastError();
    }

    // Check the file size. There must be at least 0x20 bytes
    if(nError == ERROR_SUCCESS)
    {
        FileStream_GetSize(pStream, &FileSize);
        if(FileSize < MPQ_HEADER_SIZE_V1)
            nError = ERROR_BAD_FORMAT;
    }

    // Allocate the MPQhandle
    if(nError == ERROR_SUCCESS)
    {
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Initialize handle structure and allocate structure for MPQ header
    if(nError == ERROR_SUCCESS)
    {
        ULONGLONG SearchOffset = 0;
        DWORD dwStreamFlags = 0;
        DWORD dwHeaderSize;
        DWORD dwHeaderID;

        memset(ha, 0, sizeof(TMPQArchive));
        ha->pfnHashString = HashString;
        ha->pStream = pStream;
        pStream = NULL;

        // Set the archive read only if the stream is read-only
        FileStream_GetFlags(ha->pStream, &dwStreamFlags);
        ha->dwFlags |= (dwStreamFlags & STREAM_FLAG_READ_ONLY) ? MPQ_FLAG_READ_ONLY : 0;

        // Also remember if we shall check sector CRCs when reading file
        if(dwFlags & MPQ_OPEN_CHECK_SECTOR_CRC)
            ha->dwFlags |= MPQ_FLAG_CHECK_SECTOR_CRC;

        // Find the offset of MPQ header within the file
        while(SearchOffset < FileSize)
        {
            DWORD dwBytesAvailable = MPQ_HEADER_SIZE_V4;

            // Cut the bytes available, if needed
            if((FileSize - SearchOffset) < MPQ_HEADER_SIZE_V4)
                dwBytesAvailable = (DWORD)(FileSize - SearchOffset);

            // Read the eventual MPQ header
            if(!FileStream_Read(ha->pStream, &SearchOffset, ha->HeaderData, dwBytesAvailable))
            {
                nError = GetLastError();
                break;
            }

            // There are AVI files from Warcraft III with 'MPQ' extension.
            if(SearchOffset == 0 && IsAviFile(ha->HeaderData))
            {
                nError = ERROR_AVI_FILE;
                break;
            }

            // If there is the MPQ user data signature, process it
            dwHeaderID = BSWAP_INT32_UNSIGNED(ha->HeaderData[0]);
            if(dwHeaderID == ID_MPQ_USERDATA && ha->pUserData == NULL && (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) == 0)
            {
                // Verify if this looks like a valid user data
                pUserData = IsValidMpqUserData(SearchOffset, FileSize, ha->HeaderData);
                if(pUserData != NULL)
                {
                    // Fill the user data header
                    ha->UserDataPos = SearchOffset;
                    ha->pUserData = &ha->UserData;
                    memcpy(ha->pUserData, pUserData, sizeof(TMPQUserData));

                    // Continue searching from that position
                    SearchOffset += ha->pUserData->dwHeaderOffs;
                    continue;
                }
            }

            // There must be MPQ header signature. Note that STORM.dll from Warcraft III actually
            // tests the MPQ header size. It must be at least 0x20 bytes in order to load it
            // Abused by Spazzler Map protector. Note that the size check is not present
            // in Storm.dll v 1.00, so Diablo I code would load the MPQ anyway.
            dwHeaderSize = BSWAP_INT32_UNSIGNED(ha->HeaderData[1]);
            if(dwHeaderID == ID_MPQ && dwHeaderSize >= MPQ_HEADER_SIZE_V1)
            {
                // Now convert the header to version 4
                nError = ConvertMpqHeaderToFormat4(ha, SearchOffset, FileSize, dwFlags);
                break;
            }

            // Check for MPK archives (Longwu Online - MPQ fork)
            if(dwHeaderID == ID_MPK)
            {
                // Now convert the MPK header to MPQ Header version 4
                nError = ConvertMpkHeaderToFormat4(ha, FileSize, dwFlags);
                break;
            }

            // If searching for the MPQ header is disabled, return an error
            if(dwFlags & MPQ_OPEN_NO_HEADER_SEARCH)
            {
                nError = ERROR_NOT_SUPPORTED;
                break;
            }

            // Move to the next possible offset
            SearchOffset += 0x200;
        }

        // Did we identify one of the supported headers?
        if(nError == ERROR_SUCCESS)
        {
            // Set the user data position to the MPQ header, if none
            if(ha->pUserData == NULL)
                ha->UserDataPos = SearchOffset;

            // Set the position of the MPQ header
            ha->pHeader = (TMPQHeader *)ha->HeaderData;
            ha->MpqPos = SearchOffset;

            // Sector size must be nonzero.
            if(SearchOffset >= FileSize || ha->pHeader->wSectorSize == 0)
                nError = ERROR_BAD_FORMAT;
        }
    }

    // Fix table positions according to format
    if(nError == ERROR_SUCCESS)
    {
        // Dump the header
//      DumpMpqHeader(ha->pHeader);

        // W3x Map Protectors use the fact that War3's Storm.dll ignores the MPQ user data,
        // and ignores the MPQ format version as well. The trick is to
        // fake MPQ format 2, with an improper hi-word position of hash table and block table
        // We can overcome such protectors by forcing opening the archive as MPQ v 1.0
        if(dwFlags & MPQ_OPEN_FORCE_MPQ_V1)
        {
            ha->pHeader->wFormatVersion = MPQ_FORMAT_VERSION_1;
            ha->pHeader->dwHeaderSize = MPQ_HEADER_SIZE_V1;
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;
            ha->pUserData = NULL;
        }

        // Both MPQ_OPEN_NO_LISTFILE or MPQ_OPEN_NO_ATTRIBUTES trigger read only mode
        if(dwFlags & (MPQ_OPEN_NO_LISTFILE | MPQ_OPEN_NO_ATTRIBUTES))
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;

        // Set the size of file sector
        ha->dwSectorSize = (0x200 << ha->pHeader->wSectorSize);

        // Verify if any of the tables doesn't start beyond the end of the file
        nError = VerifyMpqTablePositions(ha, FileSize);
    }

    // Read the hash table. Ignore the result, as hash table is no longer required
    // Read HET table. Ignore the result, as HET table is no longer required
    if(nError == ERROR_SUCCESS)
    {
        nError = LoadAnyHashTable(ha);
    }

    // Now, build the file table. It will be built by combining
    // the block table, BET table, hi-block table, (attributes) and (listfile).
    if(nError == ERROR_SUCCESS)
    {
        nError = BuildFileTable(ha);
    }

    // Verify the file table, if no kind of malformation was detected
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_MALFORMED) == 0)
    {
        TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
        ULONGLONG RawFilePos;

        // Parse all file entries
        for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
        {
            // If that file entry is valid, check the file position
            if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // Get the 64-bit file position,
                // relative to the begin of the file
                RawFilePos = ha->MpqPos + pFileEntry->ByteOffset;

                // Begin of the file must be within range
                if(RawFilePos > FileSize)
                {
                    nError = ERROR_FILE_CORRUPT;
                    break;
                }

                // End of the file must be within range
                RawFilePos += pFileEntry->dwCmpSize;
                if(RawFilePos > FileSize)
                {
                    nError = ERROR_FILE_CORRUPT;
                    break;
                }
            }
        }
    }

    // Load the internal listfile and include it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_LISTFILE) == 0)
    {
        // Save the flags for (listfile)
        pFileEntry = GetFileEntryLocale(ha, LISTFILE_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (listfile) is optional.
            SFileAddListFile((HANDLE)ha, NULL);
            ha->dwFileFlags1 = pFileEntry->dwFlags;
        }
    }

    // Load the "(attributes)" file and merge it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_ATTRIBUTES) == 0)
    {
        // Save the flags for (attributes)
        pFileEntry = GetFileEntryLocale(ha, ATTRIBUTES_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (attributes) is optional.
            SAttrLoadAttributes(ha);
            ha->dwFileFlags2 = pFileEntry->dwFlags;
        }
    }

    // Cleanup and exit
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeMPQArchive(ha);
        SetLastError(nError);
        ha = NULL;
    }

    *phMpq = ha;
    return (nError == ERROR_SUCCESS);
}
Beispiel #8
0
static bool CalculateMpqHashSha1(
    TMPQArchive * ha,
    PMPQ_SIGNATURE_INFO pSI,
    unsigned char * sha1_tail0,
    unsigned char * sha1_tail1,
    unsigned char * sha1_tail2)
{
    ULONGLONG BeginBuffer;
    hash_state sha1_state_temp;
    hash_state sha1_state;
    LPBYTE pbDigestBuffer = NULL;
    char szPlainName[MAX_PATH];

    // Allocate buffer for creating the MPQ digest.
    pbDigestBuffer = STORM_ALLOC(BYTE, MPQ_DIGEST_UNIT_SIZE);
    if(pbDigestBuffer == NULL)
        return false;

    // Initialize SHA1 state structure
    sha1_init(&sha1_state);

    // Calculate begin of data to be hashed
    BeginBuffer = pSI->BeginMpqData;

    // Create the digest
    for(;;)
    {
        ULONGLONG BytesRemaining;
        DWORD dwToRead = MPQ_DIGEST_UNIT_SIZE;

        // Check the number of bytes remaining
        BytesRemaining = pSI->EndMpqData - BeginBuffer;
        if(BytesRemaining < MPQ_DIGEST_UNIT_SIZE)
            dwToRead = (DWORD)BytesRemaining;
        if(dwToRead == 0)
            break;

        // Read the next chunk 
        if(!FileStream_Read(ha->pStream, &BeginBuffer, pbDigestBuffer, dwToRead))
        {
            STORM_FREE(pbDigestBuffer);
            return false;
        }

        // Pass the buffer to the hashing function
        sha1_process(&sha1_state, pbDigestBuffer, dwToRead);

        // Move pointers
        BeginBuffer += dwToRead;
    }

    // Add all three known tails and generate three hashes
    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    sha1_done(&sha1_state_temp, sha1_tail0);

    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    GetPlainAnsiFileName(FileStream_GetFileName(ha->pStream), szPlainName);
    AddTailToSha1(&sha1_state_temp, szPlainName);
    sha1_done(&sha1_state_temp, sha1_tail1);

    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    AddTailToSha1(&sha1_state_temp, "ARCHIVE");
    sha1_done(&sha1_state_temp, sha1_tail2);

    // Finalize the MD5 hash
    STORM_FREE(pbDigestBuffer);
    return true;
}
bool WINAPI SFileCreateArchive2(const TCHAR * szMpqName, PSFILE_CREATE_MPQ pCreateInfo, HANDLE * phMpq)
{
    TFileStream * pStream = NULL;           // File stream
    TMPQArchive * ha = NULL;                // MPQ archive handle
    TMPQHeader * pHeader;
    ULONGLONG MpqPos = 0;                   // Position of MPQ header in the file
    HANDLE hMpq = NULL;
    DWORD dwBlockTableSize = 0;             // Initial block table size
    DWORD dwHashTableSize = 0;
    DWORD dwReservedFiles = 0;              // Number of reserved file entries
    DWORD dwMpqFlags = 0;
    int nError = ERROR_SUCCESS;

    // Check the parameters, if they are valid
    if(szMpqName == NULL || *szMpqName == 0 || pCreateInfo == NULL || phMpq == NULL)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // Verify if all variables in SFILE_CREATE_MPQ are correct
    if((pCreateInfo->cbSize == 0 || pCreateInfo->cbSize > sizeof(SFILE_CREATE_MPQ))    ||
       (pCreateInfo->dwMpqVersion > MPQ_FORMAT_VERSION_4)                              ||
       (pCreateInfo->pvUserData != NULL || pCreateInfo->cbUserData != 0)               ||
       (pCreateInfo->dwAttrFlags & ~MPQ_ATTRIBUTE_ALL)                                 ||
       (pCreateInfo->dwSectorSize & (pCreateInfo->dwSectorSize - 1))                   ||
       (pCreateInfo->dwRawChunkSize & (pCreateInfo->dwRawChunkSize - 1)))
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();

    // We verify if the file already exists and if it's a MPQ archive.
    // If yes, we won't allow to overwrite it.
    if(SFileOpenArchive(szMpqName, 0, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE | MPQ_OPEN_NO_ATTRIBUTES | MPQ_OPEN_NO_LISTFILE, &hMpq))
    {
        SFileCloseArchive(hMpq);
        SetLastError(ERROR_ALREADY_EXISTS);
        return false;
    }

    //
    // At this point, we have to create the archive.
    // - If the file exists, convert it to MPQ archive.
    // - If the file doesn't exist, create new empty file
    //

    pStream = FileStream_OpenFile(szMpqName, pCreateInfo->dwStreamFlags);
    if(pStream == NULL)
    {
        pStream = FileStream_CreateFile(szMpqName, pCreateInfo->dwStreamFlags);
        if(pStream == NULL)
            return false;
    }

    // Increment the maximum amount of files to have space for (listfile)
    if(pCreateInfo->dwMaxFileCount && pCreateInfo->dwFileFlags1)
    {
        dwMpqFlags |= MPQ_FLAG_LISTFILE_NEW;
        dwReservedFiles++;
    }

    // Increment the maximum amount of files to have space for (attributes)
    if(pCreateInfo->dwMaxFileCount && pCreateInfo->dwFileFlags2 && pCreateInfo->dwAttrFlags)
    {
        dwMpqFlags |= MPQ_FLAG_ATTRIBUTES_NEW;
        dwReservedFiles++;
    }

    // Increment the maximum amount of files to have space for (signature)
    if(pCreateInfo->dwMaxFileCount && pCreateInfo->dwFileFlags3)
    {
        dwMpqFlags |= MPQ_FLAG_SIGNATURE_NEW;
        dwReservedFiles++;
    }

    // If file count is not zero, initialize the hash table size
    dwHashTableSize = GetNearestPowerOfTwo(pCreateInfo->dwMaxFileCount + dwReservedFiles);

    // Retrieve the file size and round it up to 0x200 bytes
    FileStream_GetSize(pStream, &MpqPos);
    MpqPos = (MpqPos + 0x1FF) & (ULONGLONG)0xFFFFFFFFFFFFFE00ULL;
    if(!FileStream_SetSize(pStream, MpqPos))
        nError = GetLastError();

#ifdef _DEBUG    
    // Debug code, used for testing StormLib
//  dwBlockTableSize = dwHashTableSize * 2;
#endif

    // Create the archive handle
    if(nError == ERROR_SUCCESS)
    {
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Fill the MPQ archive handle structure
    if(nError == ERROR_SUCCESS)
    {
        memset(ha, 0, sizeof(TMPQArchive));
        ha->pfnHashString   = HashStringSlash;
        ha->pStream         = pStream;
        ha->dwSectorSize    = pCreateInfo->dwSectorSize;
        ha->UserDataPos     = MpqPos;
        ha->MpqPos          = MpqPos;
        ha->pHeader         = pHeader = (TMPQHeader *)ha->HeaderData;
        ha->dwMaxFileCount  = dwHashTableSize;
        ha->dwFileTableSize = 0;
        ha->dwReservedFiles = dwReservedFiles;
        ha->dwFileFlags1    = pCreateInfo->dwFileFlags1;
        ha->dwFileFlags2    = pCreateInfo->dwFileFlags2;
        ha->dwFileFlags3    = pCreateInfo->dwFileFlags3 ? MPQ_FILE_EXISTS : 0;
        ha->dwAttrFlags     = pCreateInfo->dwAttrFlags;
        ha->dwFlags         = dwMpqFlags | MPQ_FLAG_CHANGED;
        pStream = NULL;

        // Fill the MPQ header
        memset(pHeader, 0, sizeof(ha->HeaderData));
        pHeader->dwID             = ID_MPQ;
        pHeader->dwHeaderSize     = MpqHeaderSizes[pCreateInfo->dwMpqVersion];
        pHeader->dwArchiveSize    = pHeader->dwHeaderSize + dwHashTableSize * sizeof(TMPQHash);
        pHeader->wFormatVersion   = (USHORT)pCreateInfo->dwMpqVersion;
        pHeader->wSectorSize      = GetSectorSizeShift(ha->dwSectorSize);
        pHeader->dwHashTablePos   = pHeader->dwHeaderSize;
        pHeader->dwHashTableSize  = dwHashTableSize;
        pHeader->dwBlockTablePos  = pHeader->dwHashTablePos + dwHashTableSize * sizeof(TMPQHash);
        pHeader->dwBlockTableSize = dwBlockTableSize;

        // For MPQs version 4 and higher, we set the size of raw data block
        // for calculating MD5
        if(pCreateInfo->dwMpqVersion >= MPQ_FORMAT_VERSION_4)
            pHeader->dwRawChunkSize = pCreateInfo->dwRawChunkSize;

        // Write the naked MPQ header
        nError = WriteNakedMPQHeader(ha);
    }

    // Create initial HET table, if the caller required an MPQ format 3.0 or newer
    if(nError == ERROR_SUCCESS && pCreateInfo->dwMpqVersion >= MPQ_FORMAT_VERSION_3 && pCreateInfo->dwMaxFileCount != 0)
    {
        ha->pHetTable = CreateHetTable(ha->dwFileTableSize, 0, 0x40, NULL);
        if(ha->pHetTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Create initial hash table
    if(nError == ERROR_SUCCESS && dwHashTableSize != 0)
    {
        nError = CreateHashTable(ha, dwHashTableSize);
    }

    // Create initial file table
    if(nError == ERROR_SUCCESS && ha->dwMaxFileCount != 0)
    {
        nError = CreateFileTable(ha, ha->dwMaxFileCount);
    }

    // Cleanup : If an error, delete all buffers and return
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeArchiveHandle(ha);
        SetLastError(nError);
        ha = NULL;
    }
    
    // Return the values
    *phMpq = (HANDLE)ha;
    return (nError == ERROR_SUCCESS);
}
bool WINAPI SFileAddFileEx(
    HANDLE hMpq,
    const TCHAR * szFileName,
    const char * szArchivedName,
    DWORD dwFlags,
    DWORD dwCompression,            // Compression of the first sector
    DWORD dwCompressionNext)        // Compression of next sectors
{
    ULONGLONG FileSize = 0;
    ULONGLONG FileTime = 0;
    TFileStream * pStream = NULL;
    HANDLE hMpqFile = NULL;
    LPBYTE pbFileData = NULL;
    DWORD dwBytesRemaining = 0;
    DWORD dwBytesToRead;
    DWORD dwSectorSize = 0x1000;
    DWORD dwChannels = 0;
    bool bIsAdpcmCompression = false;
    bool bIsFirstSector = true;
    int nError = ERROR_SUCCESS;

    // Check parameters
    if(hMpq == NULL || szFileName == NULL || *szFileName == 0)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // Open added file
    pStream = FileStream_OpenFile(szFileName, STREAM_FLAG_READ_ONLY | STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
    if(pStream == NULL)
        return false;

    // Files bigger than 4GB cannot be added to MPQ
    FileStream_GetTime(pStream, &FileTime);
    FileStream_GetSize(pStream, &FileSize);
    if(FileSize >> 32)
        nError = ERROR_DISK_FULL;

    // Allocate data buffer for reading from the source file
    if(nError == ERROR_SUCCESS)
    {
        dwBytesRemaining = (DWORD)FileSize;
        pbFileData = STORM_ALLOC(BYTE, dwSectorSize);
        if(pbFileData == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Deal with various combination of compressions
    if(nError == ERROR_SUCCESS)
    {
        // When the compression for next blocks is set to default,
        // we will copy the compression for the first sector
        if(dwCompressionNext == MPQ_COMPRESSION_NEXT_SAME)
            dwCompressionNext = dwCompression;
        
        // If the caller wants ADPCM compression, we make sure
        // that the first sector is not compressed with lossy compression
        if(dwCompressionNext & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
        {
            // The compression of the first file sector must not be ADPCM
            // in order not to corrupt the headers
            if(dwCompression & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
                dwCompression = MPQ_COMPRESSION_PKWARE;
            
            // Remove both flag mono and stereo flags.
            // They will be re-added according to WAVE type
            dwCompressionNext &= ~(MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO);
            bIsAdpcmCompression = true;
        }

        // Initiate adding file to the MPQ
        if(!SFileCreateFile(hMpq, szArchivedName, FileTime, (DWORD)FileSize, lcFileLocale, dwFlags, &hMpqFile))
            nError = GetLastError();
    }

    // Write the file data to the MPQ
    while(nError == ERROR_SUCCESS && dwBytesRemaining != 0)
    {
        // Get the number of bytes remaining in the source file
        dwBytesToRead = dwBytesRemaining;
        if(dwBytesToRead > dwSectorSize)
            dwBytesToRead = dwSectorSize;

        // Read data from the local file
        if(!FileStream_Read(pStream, NULL, pbFileData, dwBytesToRead))
        {
            nError = GetLastError();
            break;
        }

        // If the file being added is a WAVE file, we check number of channels
        if(bIsFirstSector && bIsAdpcmCompression)
        {
            // The file must really be a WAVE file with at least 16 bits per sample,
            // otherwise the ADPCM compression will corrupt it
            if(IsWaveFile_16BitsPerAdpcmSample(pbFileData, dwBytesToRead, &dwChannels))
            {
                // Setup the compression of next sectors according to number of channels
                dwCompressionNext |= (dwChannels == 1) ? MPQ_COMPRESSION_ADPCM_MONO : MPQ_COMPRESSION_ADPCM_STEREO;
            }
            else
            {
                // Setup the compression of next sectors to a lossless compression
                dwCompressionNext = (dwCompression & MPQ_LOSSY_COMPRESSION_MASK) ? MPQ_COMPRESSION_PKWARE : dwCompression;
            }

            bIsFirstSector = false;
        }

        // Add the file sectors to the MPQ
        if(!SFileWriteFile(hMpqFile, pbFileData, dwBytesToRead, dwCompression))
        {
            nError = GetLastError();
            break;
        }

        // Set the next data compression
        dwBytesRemaining -= dwBytesToRead;
        dwCompression = dwCompressionNext;
    }

    // Finish the file writing
    if(hMpqFile != NULL)
    {
        if(!SFileFinishFile(hMpqFile))
            nError = GetLastError();
    }

    // Cleanup and exit
    if(pbFileData != NULL)
        STORM_FREE(pbFileData);
    if(pStream != NULL)
        FileStream_Close(pStream);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
static int ReadMpqFileSingleUnit(TMPQFile * hf, void * pvBuffer, DWORD dwFilePos, DWORD dwToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos = hf->RawFilePos;
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbCompressed = NULL;
    LPBYTE pbRawData = NULL;
    int nError = ERROR_SUCCESS;

    // If the file buffer is not allocated yet, do it.
    if(hf->pbFileSector == NULL)
    {
        nError = AllocateSectorBuffer(hf);
        if(nError != ERROR_SUCCESS)
            return nError;
        pbRawData = hf->pbFileSector;
    }

    // If the file is a patch file, adjust raw data offset
    if(hf->pPatchInfo != NULL)
        RawFilePos += hf->pPatchInfo->dwLength;

    // If the file sector is not loaded yet, do it
    if(hf->dwSectorOffs != 0)
    {
        // Is the file compressed?
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
        {
            // Allocate space for compressed data
            pbCompressed = STORM_ALLOC(BYTE, pFileEntry->dwCmpSize);
            if(pbCompressed == NULL)
                return ERROR_NOT_ENOUGH_MEMORY;
            pbRawData = pbCompressed;
        }

        // Load the raw (compressed, encrypted) data
        if(!FileStream_Read(ha->pStream, &RawFilePos, pbRawData, pFileEntry->dwCmpSize))
        {
            STORM_FREE(pbCompressed);
            return GetLastError();
        }

        // If the file is encrypted, we have to decrypt the data first
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
            DecryptMpqBlock(pbRawData, pFileEntry->dwCmpSize, hf->dwFileKey);
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
        }

        // If the file is compressed, we have to decompress it now
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
        {
            int cbOutBuffer = (int)hf->dwDataSize;
            int cbInBuffer = (int)pFileEntry->dwCmpSize;
            int nResult = 0;

            //
            // If the file is an incremental patch, the size of compressed data
            // is determined as pFileEntry->dwCmpSize - sizeof(TPatchInfo)
            //
            // In "wow-update-12694.MPQ" from Wow-Cataclysm BETA:
            //
            // File                                    CmprSize   DcmpSize DataSize Compressed?
            // --------------------------------------  ---------- -------- -------- ---------------
            // esES\DBFilesClient\LightSkyBox.dbc      0xBE->0xA2  0xBC     0xBC     Yes
            // deDE\DBFilesClient\MountCapability.dbc  0x93->0x77  0x77     0x77     No
            //

            if(pFileEntry->dwFlags & MPQ_FILE_PATCH_FILE)
                cbInBuffer = cbInBuffer - sizeof(TPatchInfo);

            // Is the file compressed by Blizzard's multiple compression ?
            if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
            {
                if(ha->pHeader->wFormatVersion >= MPQ_FORMAT_VERSION_2)
                    nResult = SCompDecompress2((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, cbInBuffer);
                else
                    nResult = SCompDecompress((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, cbInBuffer);
            }

            // Is the file compressed by PKWARE Data Compression Library ?
            // Note: Single unit files compressed with IMPLODE are not supported by Blizzard
            else if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
                nResult = SCompExplode((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, cbInBuffer);

            nError = (nResult != 0) ? ERROR_SUCCESS : ERROR_FILE_CORRUPT;
        }
        else
        {
            if(pbRawData != hf->pbFileSector)
                memcpy(hf->pbFileSector, pbRawData, hf->dwDataSize);
        }

        // Free the decompression buffer.
        if(pbCompressed != NULL)
            STORM_FREE(pbCompressed);

        // The file sector is now properly loaded
        hf->dwSectorOffs = 0;
    }

    // At this moment, we have the file loaded into the file buffer.
    // Copy as much as the caller wants
    if(nError == ERROR_SUCCESS && hf->dwSectorOffs == 0)
    {
        // File position is greater or equal to file size ?
        if(dwFilePos >= hf->dwDataSize)
        {
            *pdwBytesRead = 0;
            return ERROR_SUCCESS;
        }

        // If not enough bytes remaining in the file, cut them
        if((hf->dwDataSize - dwFilePos) < dwToRead)
            dwToRead = (hf->dwDataSize - dwFilePos);

        // Copy the bytes
        memcpy(pvBuffer, hf->pbFileSector + dwFilePos, dwToRead);

        // Give the number of bytes read
        *pdwBytesRead = dwToRead;
        return ERROR_SUCCESS;
    }

    // An error, sorry
    return ERROR_CAN_NOT_COMPLETE;
}
//  hf            - MPQ File handle.
//  pbBuffer      - Pointer to target buffer to store sectors.
//  dwByteOffset  - Position of sector in the file (relative to file begin)
//  dwBytesToRead - Number of bytes to read. Must be multiplier of sector size.
//  pdwBytesRead  - Stored number of bytes loaded
static int ReadMpqSectors(TMPQFile * hf, LPBYTE pbBuffer, DWORD dwByteOffset, DWORD dwBytesToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos;
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbRawSector = NULL;
    LPBYTE pbOutSector = pbBuffer;
    LPBYTE pbInSector = pbBuffer;
    DWORD dwRawBytesToRead;
    DWORD dwRawSectorOffset = dwByteOffset;
    DWORD dwSectorsToRead = dwBytesToRead / ha->dwSectorSize;
    DWORD dwSectorIndex = dwByteOffset / ha->dwSectorSize;
    DWORD dwSectorsDone = 0;
    DWORD dwBytesRead = 0;
    int nError = ERROR_SUCCESS;

    // Note that dwByteOffset must be aligned to size of one sector
    // Note that dwBytesToRead must be a multiplier of one sector size
    // This is local function, so we won't check if that's true.
    // Note that files stored in single units are processed by a separate function

    // If there is not enough bytes remaining, cut dwBytesToRead
    if((dwByteOffset + dwBytesToRead) > hf->dwDataSize)
        dwBytesToRead = hf->dwDataSize - dwByteOffset;
    dwRawBytesToRead = dwBytesToRead;

    // Perform all necessary work to do with compressed files
    if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
    {
        // If the sector positions are not loaded yet, do it
        if(hf->SectorOffsets == NULL)
        {
            nError = AllocateSectorOffsets(hf, true);
            if(nError != ERROR_SUCCESS)
                return nError;
        }

        // If the sector checksums are not loaded yet, load them now.
        if(hf->SectorChksums == NULL && (pFileEntry->dwFlags & MPQ_FILE_SECTOR_CRC) && hf->bLoadedSectorCRCs == false)
        {
            //
            // Sector CRCs is plain crap feature. It is almost never present,
            // often it's empty, or the end offset of sector CRCs is zero.
            // We only try to load sector CRCs once, and regardless if it fails
            // or not, we won't try that again for the given file.
            //

            AllocateSectorChecksums(hf, true);
            hf->bLoadedSectorCRCs = true;
        }

        // TODO: If the raw data MD5s are not loaded yet, load them now
        // Only do it if the MPQ is of format 4.0
        //      if(ha->pHeader->wFormatVersion >= MPQ_FORMAT_VERSION_4 && ha->pHeader->dwRawChunkSize != 0)
        //      {
        //          nError = AllocateRawMD5s(hf, true);
        //          if(nError != ERROR_SUCCESS)
        //              return nError;
        //      }

        // If the file is compressed, also allocate secondary buffer
        pbInSector = pbRawSector = STORM_ALLOC(BYTE, dwBytesToRead);
        if(pbRawSector == NULL)
            return ERROR_NOT_ENOUGH_MEMORY;

        // Assign the temporary buffer as target for read operation
        dwRawSectorOffset = hf->SectorOffsets[dwSectorIndex];
        dwRawBytesToRead = hf->SectorOffsets[dwSectorIndex + dwSectorsToRead] - dwRawSectorOffset;
    }

    // Calculate raw file offset where the sector(s) are stored.
    CalculateRawSectorOffset(RawFilePos, hf, dwRawSectorOffset);

    // Set file pointer and read all required sectors
    if(!FileStream_Read(ha->pStream, &RawFilePos, pbInSector, dwRawBytesToRead))
        return GetLastError();
    dwBytesRead = 0;

    // Now we have to decrypt and decompress all file sectors that have been loaded
    for(DWORD i = 0; i < dwSectorsToRead; i++)
    {
        DWORD dwRawBytesInThisSector = ha->dwSectorSize;
        DWORD dwBytesInThisSector = ha->dwSectorSize;
        DWORD dwIndex = dwSectorIndex + i;

        // If there is not enough bytes in the last sector,
        // cut the number of bytes in this sector
        if(dwRawBytesInThisSector > dwBytesToRead)
            dwRawBytesInThisSector = dwBytesToRead;
        if(dwBytesInThisSector > dwBytesToRead)
            dwBytesInThisSector = dwBytesToRead;

        // If the file is compressed, we have to adjust the raw sector size
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
            dwRawBytesInThisSector = hf->SectorOffsets[dwIndex + 1] - hf->SectorOffsets[dwIndex];

        // If the file is encrypted, we have to decrypt the sector
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            BSWAP_ARRAY32_UNSIGNED(pbInSector, dwRawBytesInThisSector);

            // If we don't know the key, try to detect it by file content
            if(hf->dwFileKey == 0)
            {
                hf->dwFileKey = DetectFileKeyByContent(pbInSector, dwBytesInThisSector);
                if(hf->dwFileKey == 0)
                {
                    nError = ERROR_UNKNOWN_FILE_KEY;
                    break;
                }
            }

            DecryptMpqBlock(pbInSector, dwRawBytesInThisSector, hf->dwFileKey + dwIndex);
            BSWAP_ARRAY32_UNSIGNED(pbInSector, dwRawBytesInThisSector);
        }

        // If the file has sector CRC check turned on, perform it
        if(hf->bCheckSectorCRCs && hf->SectorChksums != NULL)
        {
            DWORD dwAdlerExpected = hf->SectorChksums[dwIndex];
            DWORD dwAdlerValue = 0;

            // We can only check sector CRC when it's not zero
            // Neither can we check it if it's 0xFFFFFFFF.
            if(dwAdlerExpected != 0 && dwAdlerExpected != 0xFFFFFFFF)
            {
                dwAdlerValue = adler32(0, pbInSector, dwRawBytesInThisSector);
                if(dwAdlerValue != dwAdlerExpected)
                {
                    nError = ERROR_CHECKSUM_ERROR;
                    break;
                }
            }
        }

        // If the sector is really compressed, decompress it.
        // WARNING : Some sectors may not be compressed, it can be determined only
        // by comparing uncompressed and compressed size !!!
        if(dwRawBytesInThisSector < dwBytesInThisSector)
        {
            int cbOutSector = dwBytesInThisSector;
            int cbInSector = dwRawBytesInThisSector;
            int nResult = 0;

            // Is the file compressed by Blizzard's multiple compression ?
            if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
            {
                if(ha->pHeader->wFormatVersion >= MPQ_FORMAT_VERSION_2)
                    nResult = SCompDecompress2((char *)pbOutSector, &cbOutSector, (char *)pbInSector, cbInSector);
                else
                    nResult = SCompDecompress((char *)pbOutSector, &cbOutSector, (char *)pbInSector, cbInSector);
            }

            // Is the file compressed by PKWARE Data Compression Library ?
            else if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
            {
                nResult = SCompExplode((char *)pbOutSector, &cbOutSector, (char *)pbInSector, cbInSector);
            }

            // Did the decompression fail ?
            if(nResult == 0)
            {
                nError = ERROR_FILE_CORRUPT;
                break;
            }
        }
        else
        {
            if(pbOutSector != pbInSector)
                memcpy(pbOutSector, pbInSector, dwBytesInThisSector);
        }

        // Move pointers
        dwBytesToRead -= dwBytesInThisSector;
        dwByteOffset += dwBytesInThisSector;
        dwBytesRead += dwBytesInThisSector;
        pbOutSector += dwBytesInThisSector;
        pbInSector += dwRawBytesInThisSector;
        dwSectorsDone++;
    }

    // Free all used buffers
    if(pbRawSector != NULL)
        STORM_FREE(pbRawSector);

    // Give the caller thenumber of bytes read
    *pdwBytesRead = dwBytesRead;
    return nError;
}
int EXPORT_SYMBOL SFileCompactArchive(void * hMpq, const char * szListFile, int bReserved)
{
    TFileStream * pTempStream = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    uint64_t ByteOffset;
    uint64_t ByteCount;
    uint32_t * pFileKeys = NULL;
    char szTempFile[1024] = "";
    char * szTemp = NULL;
    int nError = ERROR_SUCCESS;

    /* Test the valid parameters */
    if(!IsValidMpqHandle(hMpq))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    /* If the MPQ is changed at this moment, we have to flush the archive */
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED))
    {
        SFileFlushArchive(hMpq);
    }

    /* Create the table with file keys */
    if(nError == ERROR_SUCCESS)
    {
        if((pFileKeys = STORM_ALLOC(uint32_t, ha->dwFileTableSize)) != NULL)
            memset(pFileKeys, 0, sizeof(uint32_t) * ha->dwFileTableSize);
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    /* First of all, we have to check of we are able to decrypt all files. */
    /* If not, sorry, but the archive cannot be compacted. */
    if(nError == ERROR_SUCCESS)
    {
        /* Initialize the progress variables for compact callback */
        FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes));
        ha->CompactBytesProcessed = 0;
        nError = CheckIfAllKeysKnown(ha, szListFile, pFileKeys);
    }

    /* Get the temporary file name and create it */
    if(nError == ERROR_SUCCESS)
    {
        strcpy(szTempFile, FileStream_GetFileName(ha->pStream));
        if((szTemp = strrchr(szTempFile, '.')) != NULL)
            strcpy(szTemp + 1, "mp_");
        else
            strcat(szTempFile, "_");

        pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
        if(pTempStream == NULL)
            nError = GetLastError();
    }

    /* Write the data before MPQ user data (if any) */
    if(nError == ERROR_SUCCESS && ha->UserDataPos != 0)
    {
        /* Inform the application about the progress */
        if(ha->pfnCompactCB != NULL)
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes);

        ByteOffset = 0;
        ByteCount = ha->UserDataPos;
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
    }

    /* Write the MPQ user data (if any) */
    if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos)
    {
        /* At this point, we assume that the user data size is equal */
        /* to pUserData->dwHeaderOffs. */
        /* If this assumption doesn't work, then we have an unknown version of MPQ */
        ByteOffset = ha->UserDataPos;
        ByteCount = ha->MpqPos - ha->UserDataPos;

        assert(ha->pUserData != NULL);
        assert(ha->pUserData->dwHeaderOffs == ByteCount);
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
    }

    /* Write the MPQ header */
    if(nError == ERROR_SUCCESS)
    {
        TMPQHeader SaveMpqHeader;

        /* Write the MPQ header to the file */
        memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4);
        if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize))
            nError = GetLastError();

        /* Update the progress */
        ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize;
    }

    /* Now copy all files */
    if(nError == ERROR_SUCCESS)
        nError = CopyMpqFiles(ha, pFileKeys, pTempStream);

    /* If succeeded, switch the streams */
    if(nError == ERROR_SUCCESS)
    {
        ha->dwFlags |= MPQ_FLAG_CHANGED;
        if(FileStream_Replace(ha->pStream, pTempStream))
            pTempStream = NULL;
        else
            nError = ERROR_CAN_NOT_COMPLETE;
    }

    /* Final user notification */
    if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL)
    {
        ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
        ha->CompactBytesProcessed += (ha->dwFileTableSize * sizeof(TMPQBlock));
        ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes);
    }

    /* Cleanup and return */
    if(pTempStream != NULL)
        FileStream_Close(pTempStream);
    if(pFileKeys != NULL)
        STORM_FREE(pFileKeys);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
Beispiel #14
0
bool WINAPI SFileCompactArchive(HANDLE hMpq, const char * szListFile, bool /* bReserved */)
{
    TFileStream * pTempStream = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    ULONGLONG ByteOffset;
    ULONGLONG ByteCount;
    LPDWORD pFileKeys = NULL;
    TCHAR szTempFile[MAX_PATH] = _T("");
    TCHAR * szTemp = NULL;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if(!IsValidMpqHandle(hMpq))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    // If the MPQ is changed at this moment, we have to flush the archive
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED))
    {
        SFileFlushArchive(hMpq);
    }

    // Create the table with file keys
    if(nError == ERROR_SUCCESS)
    {
        if((pFileKeys = STORM_ALLOC(DWORD, ha->dwFileTableSize)) != NULL)
            memset(pFileKeys, 0, sizeof(DWORD) * ha->dwFileTableSize);
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // First of all, we have to check of we are able to decrypt all files.
    // If not, sorry, but the archive cannot be compacted.
    if(nError == ERROR_SUCCESS)
    {
        // Initialize the progress variables for compact callback
        FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes));
        ha->CompactBytesProcessed = 0;
        nError = CheckIfAllFilesKnown(ha, szListFile, pFileKeys);
    }

    // Get the temporary file name and create it
    if(nError == ERROR_SUCCESS)
    {
        _tcscpy(szTempFile, FileStream_GetFileName(ha->pStream));
        if((szTemp = _tcsrchr(szTempFile, '.')) != NULL)
            _tcscpy(szTemp + 1, _T("mp_"));
        else
            _tcscat(szTempFile, _T("_"));

        pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
        if(pTempStream == NULL)
            nError = GetLastError();
    }

    // Write the data before MPQ user data (if any)
    if(nError == ERROR_SUCCESS && ha->UserDataPos != 0)
    {
        // Inform the application about the progress
        if(ha->pfnCompactCB != NULL)
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes);

        ByteOffset = 0;
        ByteCount = ha->UserDataPos;
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount);
    }

    // Write the MPQ user data (if any)
    if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos)
    {
        // At this point, we assume that the user data size is equal
        // to pUserData->dwHeaderOffs.
        // If this assumption doesn't work, then we have an unknown version of MPQ
        ByteOffset = ha->UserDataPos;
        ByteCount = ha->MpqPos - ha->UserDataPos;

        assert(ha->pUserData != NULL);
        assert(ha->pUserData->dwHeaderOffs == ByteCount);
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount);
    }

    // Write the MPQ header
    if(nError == ERROR_SUCCESS)
    {
        TMPQHeader SaveMpqHeader;

        // Write the MPQ header to the file
        memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4);
        if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize))
            nError = GetLastError();

        // Update the progress
        ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize;
    }

    // Now copy all files
    if(nError == ERROR_SUCCESS)
        nError = CopyMpqFiles(ha, pFileKeys, pTempStream);

    // Defragment the file table
    if(nError == ERROR_SUCCESS)
        nError = RebuildFileTable(ha, ha->pHeader->dwHashTableSize, ha->dwMaxFileCount);

    // We also need to rebuild the HET table, if any
    if(nError == ERROR_SUCCESS)
    {
        // Invalidate (listfile) and (attributes)
        InvalidateInternalFiles(ha);

        // Rebuild the HET table, if we have any
        if(ha->pHetTable != NULL)
            nError = RebuildHetTable(ha);
    }

    // If succeeded, switch the streams
    if(nError == ERROR_SUCCESS)
    {
        if(FileStream_Replace(ha->pStream, pTempStream))
            pTempStream = NULL;
        else
            nError = ERROR_CAN_NOT_COMPLETE;
    }

    // If all succeeded, save the MPQ tables
    if(nError == ERROR_SUCCESS)
    {
        //
        // Note: We don't recalculate position of the MPQ tables at this point.
        // SaveMPQTables does it automatically.
        // 

        nError = SaveMPQTables(ha);
        if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
            ha->CompactBytesProcessed += (ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock));
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }
    }

    // Cleanup and return
    if(pTempStream != NULL)
        FileStream_Close(pTempStream);
    if(pFileKeys != NULL)
        STORM_FREE(pFileKeys);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
Beispiel #15
0
static int RecryptFileData(
    TMPQArchive * ha,
    TMPQFile * hf,
    const char * szFileName,
    const char * szNewFileName)
{
    ULONGLONG RawFilePos;
    TFileEntry * pFileEntry = hf->pFileEntry;
    DWORD dwBytesToRecrypt = pFileEntry->dwCmpSize;
    DWORD dwOldKey;
    DWORD dwNewKey;
    int nError = ERROR_SUCCESS;

    // The file must be encrypted
    assert(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED);

    // File decryption key is calculated from the plain name
    szNewFileName = GetPlainFileNameA(szNewFileName);
    szFileName = GetPlainFileNameA(szFileName);

    // Calculate both file keys
    dwOldKey = DecryptFileKey(szFileName,    pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);
    dwNewKey = DecryptFileKey(szNewFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);

    // Incase the keys are equal, don't recrypt the file
    if(dwNewKey == dwOldKey)
        return ERROR_SUCCESS;
    hf->dwFileKey = dwOldKey;

    // Calculate the raw position of the file in the archive
    hf->MpqFilePos = pFileEntry->ByteOffset;
    hf->RawFilePos = ha->MpqPos + hf->MpqFilePos;

    // Allocate buffer for file transfer
    nError = AllocateSectorBuffer(hf);
    if(nError != ERROR_SUCCESS)
        return nError;

    // Also allocate buffer for sector offsets
    // Note: Don't load sector checksums, we don't need to recrypt them
    nError = AllocateSectorOffsets(hf, true);
    if(nError != ERROR_SUCCESS)
        return nError;

    // If we have sector offsets, recrypt these as well
    if(hf->SectorOffsets != NULL)
    {
        // Allocate secondary buffer for sectors copy
        DWORD * SectorOffsetsCopy = (DWORD *)STORM_ALLOC(BYTE, hf->SectorOffsets[0]);
        DWORD dwSectorOffsLen = hf->SectorOffsets[0];

        if(SectorOffsetsCopy == NULL)
            return ERROR_NOT_ENOUGH_MEMORY;

        // Recrypt the array of sector offsets
        memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
        EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwNewKey - 1);
        BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);

        // Write the recrypted array back
        if(!FileStream_Write(ha->pStream, &hf->RawFilePos, SectorOffsetsCopy, dwSectorOffsLen))
            nError = GetLastError();
        STORM_FREE(SectorOffsetsCopy);
    }

    // Now we have to recrypt all file sectors. We do it without
    // recompression, because recompression is not necessary in this case
    if(nError == ERROR_SUCCESS)
    {
        for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            DWORD dwRawDataInSector = hf->dwSectorSize;
            DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;

            // Last sector: If there is not enough bytes remaining in the file, cut the raw size
            if(dwRawDataInSector > dwBytesToRecrypt)
                dwRawDataInSector = dwBytesToRecrypt;

            // Fix the raw data length if the file is compressed
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            // Calculate the raw file offset of the file sector
            CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset);

            // Read the file sector
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // If necessary, re-encrypt the sector
            // Note: Recompression is not necessary here. Unlike encryption, 
            // the compression does not depend on the position of the file in MPQ.
            BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
            DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwOldKey + dwSector);
            EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwNewKey + dwSector);
            BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);

            // Write the sector back
            if(!FileStream_Write(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // Decrement number of bytes remaining
            dwBytesToRecrypt -= hf->dwSectorSize;
        }
    }

    return nError;
}
Beispiel #16
0
static bool CalculateMpqHashMd5(
    TMPQArchive * ha,
    PMPQ_SIGNATURE_INFO pSI,
    LPBYTE pMd5Digest)
{
    hash_state md5_state;
    ULONGLONG BeginBuffer;
    ULONGLONG EndBuffer;
    LPBYTE pbDigestBuffer = NULL;

    // Allocate buffer for creating the MPQ digest.
    pbDigestBuffer = STORM_ALLOC(BYTE, MPQ_DIGEST_UNIT_SIZE);
    if(pbDigestBuffer == NULL)
        return false;

    // Initialize the MD5 hash state
    md5_init(&md5_state);

    // Set the byte offset of begin of the data
    BeginBuffer = pSI->BeginMpqData;

    // Create the digest
    for(;;)
    {
        ULONGLONG BytesRemaining;
        LPBYTE pbSigBegin = NULL;
        LPBYTE pbSigEnd = NULL;
        DWORD dwToRead = MPQ_DIGEST_UNIT_SIZE;

        // Check the number of bytes remaining
        BytesRemaining = pSI->EndMpqData - BeginBuffer;
        if(BytesRemaining < MPQ_DIGEST_UNIT_SIZE)
            dwToRead = (DWORD)BytesRemaining;
        if(dwToRead == 0)
            break;

        // Read the next chunk 
        if(!FileStream_Read(ha->pStream, &BeginBuffer, pbDigestBuffer, dwToRead))
        {
            STORM_FREE(pbDigestBuffer);
            return false;
        }

        // Move the current byte offset
        EndBuffer = BeginBuffer + dwToRead;

        // Check if the signature is within the loaded digest
        if(BeginBuffer <= pSI->BeginExclude && pSI->BeginExclude < EndBuffer)
            pbSigBegin = pbDigestBuffer + (size_t)(pSI->BeginExclude - BeginBuffer);
        if(BeginBuffer <= pSI->EndExclude && pSI->EndExclude < EndBuffer)
            pbSigEnd = pbDigestBuffer + (size_t)(pSI->EndExclude - BeginBuffer);

        // Zero the part that belongs to the signature
        if(pbSigBegin != NULL || pbSigEnd != NULL)
        {
            if(pbSigBegin == NULL)
                pbSigBegin = pbDigestBuffer;
            if(pbSigEnd == NULL)
                pbSigEnd = pbDigestBuffer + dwToRead;

            memset(pbSigBegin, 0, (pbSigEnd - pbSigBegin));
        }

        // Pass the buffer to the hashing function
        md5_process(&md5_state, pbDigestBuffer, dwToRead);

        // Move pointers
        BeginBuffer += dwToRead;
    }

    // Finalize the MD5 hash
    md5_done(&md5_state, pMd5Digest);
    STORM_FREE(pbDigestBuffer);
    return true;
}
Beispiel #17
0
static int WriteDataToMpqFile(
    TMPQArchive * ha,
    TMPQFile * hf,
    LPBYTE pbFileData,
    DWORD dwDataSize,
    DWORD dwCompression)
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    ULONGLONG ByteOffset;
    LPBYTE pbCompressed = NULL;         // Compressed (target) data
    LPBYTE pbToWrite = NULL;            // Data to write to the file
    int nCompressionLevel = -1;         // ADPCM compression level (only used for wave files)
    int nError = ERROR_SUCCESS;

    // If the caller wants ADPCM compression, we will set wave compression level to 4,
    // which corresponds to medium quality
    if(dwCompression & LOSSY_COMPRESSION_MASK)
        nCompressionLevel = 4;

    // Make sure that the caller won't overrun the previously initiated file size
    assert(hf->dwFilePos + dwDataSize <= pFileEntry->dwFileSize);
    assert(hf->dwSectorCount != 0);
    assert(hf->pbFileSector != NULL);
    if((hf->dwFilePos + dwDataSize) > pFileEntry->dwFileSize)
        return ERROR_DISK_FULL;
    pbToWrite = hf->pbFileSector;

    // Now write all data to the file sector buffer
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwBytesInSector = hf->dwFilePos % hf->dwSectorSize;
        DWORD dwSectorIndex = hf->dwFilePos / hf->dwSectorSize;
        DWORD dwBytesToCopy;

        // Process all data. 
        while(dwDataSize != 0)
        {
            dwBytesToCopy = dwDataSize;
                
            // Check for sector overflow
            if(dwBytesToCopy > (hf->dwSectorSize - dwBytesInSector))
                dwBytesToCopy = (hf->dwSectorSize - dwBytesInSector);

            // Copy the data to the file sector
            memcpy(hf->pbFileSector + dwBytesInSector, pbFileData, dwBytesToCopy);
            dwBytesInSector += dwBytesToCopy;
            pbFileData += dwBytesToCopy;
            dwDataSize -= dwBytesToCopy;

            // Update the file position
            hf->dwFilePos += dwBytesToCopy;

            // If the current sector is full, or if the file is already full,
            // then write the data to the MPQ
            if(dwBytesInSector >= hf->dwSectorSize || hf->dwFilePos >= pFileEntry->dwFileSize)
            {
                // Set the position in the file
                ByteOffset = hf->RawFilePos + pFileEntry->dwCmpSize;

                // Update CRC32 and MD5 of the file
                md5_process((hash_state *)hf->hctx, hf->pbFileSector, dwBytesInSector);
                hf->dwCrc32 = crc32(hf->dwCrc32, hf->pbFileSector, dwBytesInSector);

                // Compress the file sector, if needed
                if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
                {
                    int nOutBuffer = (int)dwBytesInSector;
                    int nInBuffer = (int)dwBytesInSector;

                    // If the file is compressed, allocate buffer for the compressed data.
                    // Note that we allocate buffer that is a bit longer than sector size,
                    // for case if the compression method performs a buffer overrun
                    if(pbCompressed == NULL)
                    {
                        pbToWrite = pbCompressed = STORM_ALLOC(BYTE, hf->dwSectorSize + 0x100);
                        if(pbCompressed == NULL)
                        {
                            nError = ERROR_NOT_ENOUGH_MEMORY;
                            break;
                        }
                    }

                    //
                    // Note that both SCompImplode and SCompCompress give original buffer,
                    // if they are unable to comperss the data.
                    //

                    if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
                    {
                        SCompImplode((char *)pbCompressed,
                                            &nOutBuffer,
                                     (char *)hf->pbFileSector,
                                             nInBuffer);
                    }

                    if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
                    {
                        SCompCompress((char *)pbCompressed,
                                             &nOutBuffer,
                                      (char *)hf->pbFileSector,
                                              nInBuffer,
                                    (unsigned)dwCompression,
                                              0,
                                              nCompressionLevel);
                    }

                    // Update sector positions
                    dwBytesInSector = nOutBuffer;
                    if(hf->SectorOffsets != NULL)
                        hf->SectorOffsets[dwSectorIndex+1] = hf->SectorOffsets[dwSectorIndex] + dwBytesInSector;

                    // We have to calculate sector CRC, if enabled
                    if(hf->SectorChksums != NULL)
                        hf->SectorChksums[dwSectorIndex] = adler32(0, pbCompressed, nOutBuffer);
                }                 

                // Encrypt the sector, if necessary
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                {
                    BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector);
                    EncryptMpqBlock(pbToWrite, dwBytesInSector, hf->dwFileKey + dwSectorIndex);
                    BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector);
                }

                // Write the file sector
                if(!FileStream_Write(ha->pStream, &ByteOffset, pbToWrite, dwBytesInSector))
                {
                    nError = GetLastError();
                    break;
                }

                // Call the compact callback, if any
                if(AddFileCB != NULL)
                    AddFileCB(pvUserData, hf->dwFilePos, hf->dwDataSize, false);

                // Update the compressed file size
                pFileEntry->dwCmpSize += dwBytesInSector;
                dwBytesInSector = 0;
                dwSectorIndex++;
            }
        }
    }

    // Cleanup
    if(pbCompressed != NULL)
        STORM_FREE(pbCompressed);
    return nError;
}
Beispiel #18
0
static int VerifyRawMpqData(
    TMPQArchive * ha,
    ULONGLONG ByteOffset,
    DWORD dwDataSize)
{
    ULONGLONG DataOffset = ha->MpqPos + ByteOffset;
    LPBYTE pbDataChunk;
    LPBYTE pbMD5Array1;                 // Calculated MD5 array
    LPBYTE pbMD5Array2;                 // MD5 array loaded from the MPQ
    DWORD dwBytesInChunk;
    DWORD dwChunkCount;
    DWORD dwChunkSize = ha->pHeader->dwRawChunkSize;
    DWORD dwMD5Size;
    int nError = ERROR_SUCCESS;

    // Don't verify zero-sized blocks
    if(dwDataSize == 0)
        return ERROR_SUCCESS;

    // Get the number of data chunks to calculate MD5
    assert(dwChunkSize != 0);
    dwChunkCount = ((dwDataSize - 1) / dwChunkSize) + 1;
    dwMD5Size = dwChunkCount * MD5_DIGEST_SIZE;

    // Allocate space for data chunk and for the MD5 array
    pbDataChunk = STORM_ALLOC(BYTE, dwChunkSize);
    if(pbDataChunk == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Allocate space for MD5 array
    pbMD5Array1 = STORM_ALLOC(BYTE, dwMD5Size);
    pbMD5Array2 = STORM_ALLOC(BYTE, dwMD5Size);
    if(pbMD5Array1 == NULL || pbMD5Array2 == NULL)
        nError = ERROR_NOT_ENOUGH_MEMORY;

    // Calculate MD5 of each data chunk
    if(nError == ERROR_SUCCESS)
    {
        LPBYTE pbMD5 = pbMD5Array1;

        for(DWORD i = 0; i < dwChunkCount; i++)
        {
            // Get the number of bytes in the chunk
            dwBytesInChunk = STORMLIB_MIN(dwChunkSize, dwDataSize);

            // Read the data chunk
            if(!FileStream_Read(ha->pStream, &DataOffset, pbDataChunk, dwBytesInChunk))
            {
                nError = ERROR_FILE_CORRUPT;
                break;
            }

            // Calculate MD5
            CalculateDataBlockHash(pbDataChunk, dwBytesInChunk, pbMD5);

            // Move pointers and offsets
            DataOffset += dwBytesInChunk;
            dwDataSize -= dwBytesInChunk;
            pbMD5 += MD5_DIGEST_SIZE;
        }
    }

    // Read the MD5 array
    if(nError == ERROR_SUCCESS)
    {
        // Read the array of MD5
        if(!FileStream_Read(ha->pStream, &DataOffset, pbMD5Array2, dwMD5Size))
            nError = GetLastError();
    }

    // Compare the array of MD5
    if(nError == ERROR_SUCCESS)
    {
        // Compare the MD5
        if(memcmp(pbMD5Array1, pbMD5Array2, dwMD5Size))
            nError = ERROR_FILE_CORRUPT;
    }

    // Free memory and return result
    if(pbMD5Array2 != NULL)
        STORM_FREE(pbMD5Array2);
    if(pbMD5Array1 != NULL)
        STORM_FREE(pbMD5Array1);
    if(pbDataChunk != NULL)
        STORM_FREE(pbDataChunk);
    return nError;
}
Beispiel #19
0
static void * LZMA_Callback_Alloc(void *p, size_t size)
{
    p = p;
    return STORM_ALLOC(BYTE, size);
}
static int ApplyMpqPatch_BSD0(
    TMPQFile * hf,
    TPatchHeader * pPatchHeader)
{
    PBLIZZARD_BSDIFF40_FILE pBsdiff;
    LPDWORD pCtrlBlock;
    LPBYTE pbPatchData = (LPBYTE)pPatchHeader + sizeof(TPatchHeader);
    LPBYTE pDataBlock;
    LPBYTE pExtraBlock;
    LPBYTE pbNewData = NULL;
    LPBYTE pbOldData = (LPBYTE)hf->pbFileData;
    DWORD dwNewOffset = 0;                          // Current position to patch
    DWORD dwOldOffset = 0;                          // Current source position
    DWORD dwNewSize;                                // Patched file size
    DWORD dwOldSize = hf->cbFileData;               // File size before patch

    // Get pointer to the patch header
    // Format of BSDIFF header corresponds to original BSDIFF, which is:
    // 0000   8 bytes   signature "BSDIFF40"
    // 0008   8 bytes   size of the control block
    // 0010   8 bytes   size of the data block
    // 0018   8 bytes   new size of the patched file
    pBsdiff = (PBLIZZARD_BSDIFF40_FILE)pbPatchData;
    pbPatchData += sizeof(BLIZZARD_BSDIFF40_FILE);

    // Get pointer to the 32-bit BSDIFF control block
    // The control block follows immediately after the BSDIFF header
    // and consists of three 32-bit integers
    // 0000   4 bytes   Length to copy from the BSDIFF data block the new file
    // 0004   4 bytes   Length to copy from the BSDIFF extra block
    // 0008   4 bytes   Size to increment source file offset
    pCtrlBlock = (LPDWORD)pbPatchData;
    pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->CtrlBlockSize);

    // Get the pointer to the data block
    pDataBlock = (LPBYTE)pbPatchData;
    pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->DataBlockSize);

    // Get the pointer to the extra block
    pExtraBlock = (LPBYTE)pbPatchData;
    dwNewSize = (DWORD)BSWAP_INT64_UNSIGNED(pBsdiff->NewFileSize);

    // Allocate new buffer
    pbNewData = STORM_ALLOC(BYTE, dwNewSize);
    if(pbNewData == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Now patch the file
    while(dwNewOffset < dwNewSize)
    {
        DWORD dwAddDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[0]);
        DWORD dwMovDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[1]);
        DWORD dwOldMoveLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[2]);
        DWORD i;

        // Sanity check
        if((dwNewOffset + dwAddDataLength) > dwNewSize)
        {
            STORM_FREE(pbNewData);
            return ERROR_FILE_CORRUPT;
        }

        // Read the diff string to the target buffer
        memcpy(pbNewData + dwNewOffset, pDataBlock, dwAddDataLength);
        pDataBlock += dwAddDataLength;

        // Now combine the patch data with the original file
        for(i = 0; i < dwAddDataLength; i++)
        {
            if(dwOldOffset < dwOldSize)
                pbNewData[dwNewOffset] = pbNewData[dwNewOffset] + pbOldData[dwOldOffset];

            dwNewOffset++;
            dwOldOffset++;
        }

        // Sanity check
        if((dwNewOffset + dwMovDataLength) > dwNewSize)
        {
            STORM_FREE(pbNewData);
            return ERROR_FILE_CORRUPT;
        }

        // Copy the data from the extra block in BSDIFF patch
        memcpy(pbNewData + dwNewOffset, pExtraBlock, dwMovDataLength);
        pExtraBlock += dwMovDataLength;
        dwNewOffset += dwMovDataLength;

        // Move the old offset
        if(dwOldMoveLength & 0x80000000)
            dwOldMoveLength = 0x80000000 - dwOldMoveLength;
        dwOldOffset += dwOldMoveLength;
        pCtrlBlock += 3;
    }

    // Free the old file data
    STORM_FREE(hf->pbFileData);

    // Put the new data to the fil structure
    hf->pbFileData = pbNewData;
    hf->cbFileData = dwNewSize;
    return ERROR_SUCCESS;
}
bool WINAPI SFileOpenArchive(
    const TCHAR * szMpqName,
    DWORD dwPriority,
    DWORD dwFlags,
    HANDLE * phMpq)
{
    TFileStream * pStream = NULL;       // Open file stream
    TMPQArchive * ha = NULL;            // Archive handle
    ULONGLONG FileSize = 0;             // Size of the file
    int nError = ERROR_SUCCESS;   

    // Verify the parameters
    if(szMpqName == NULL || *szMpqName == 0 || phMpq == NULL)
        nError = ERROR_INVALID_PARAMETER;

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();
    dwPriority = dwPriority;

    // Open the MPQ archive file
    if(nError == ERROR_SUCCESS)
    {
        if(!(dwFlags & MPQ_OPEN_ENCRYPTED))
        {
            pStream = FileStream_OpenFile(szMpqName, (dwFlags & MPQ_OPEN_READ_ONLY) ? false : true);
            if(pStream == NULL)
                nError = GetLastError();
        }
        else
        {
            pStream = FileStream_OpenEncrypted(szMpqName);
            if(pStream == NULL)
                nError = GetLastError();
        }
    }
    
    // Allocate the MPQhandle
    if(nError == ERROR_SUCCESS)
    {
        FileStream_GetSize(pStream, FileSize);
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Initialize handle structure and allocate structure for MPQ header
    if(nError == ERROR_SUCCESS)
    {
        memset(ha, 0, sizeof(TMPQArchive));
        ha->pStream = pStream;
        pStream = NULL;

        // Remember if the archive is open for write
        if(ha->pStream->StreamFlags & (STREAM_FLAG_READ_ONLY | STREAM_FLAG_ENCRYPTED_FILE))
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;

        // Also remember if we shall check sector CRCs when reading file
        if(dwFlags & MPQ_OPEN_CHECK_SECTOR_CRC)
            ha->dwFlags |= MPQ_FLAG_CHECK_SECTOR_CRC;
    }

    // Find the offset of MPQ header within the file
    if(nError == ERROR_SUCCESS)
    {
        ULONGLONG SearchPos = 0;
        DWORD dwHeaderID;

        while(SearchPos < FileSize)
        {
            DWORD dwBytesAvailable = MPQ_HEADER_SIZE_V4;

            // Cut the bytes available, if needed
            if((FileSize - SearchPos) < MPQ_HEADER_SIZE_V4)
                dwBytesAvailable = (DWORD)(FileSize - SearchPos);

            // Read the eventual MPQ header
            if(!FileStream_Read(ha->pStream, &SearchPos, ha->HeaderData, dwBytesAvailable))
            {
                nError = GetLastError();
                break;
            }

            // There are AVI files from Warcraft III with 'MPQ' extension.
            if(SearchPos == 0 && IsAviFile(ha->HeaderData))
            {
                nError = ERROR_AVI_FILE;
                break;
            }

            // If there is the MPQ user data signature, process it
            dwHeaderID = BSWAP_INT32_UNSIGNED(*(LPDWORD)ha->HeaderData);
            if(dwHeaderID == ID_MPQ_USERDATA && ha->pUserData == NULL)
            {
                // Ignore the MPQ user data completely if the caller wants to open the MPQ as V1.0
                if((dwFlags & MPQ_OPEN_FORCE_MPQ_V1) == 0)
                {
                    // Fill the user data header
                    ha->pUserData = &ha->UserData;
                    memcpy(ha->pUserData, ha->HeaderData, sizeof(TMPQUserData));
                    BSWAP_TMPQUSERDATA(ha->pUserData);

                    // Remember the position of the user data and continue search
                    ha->UserDataPos = SearchPos;
                    SearchPos += ha->pUserData->dwHeaderOffs;
                    continue;
                }
            }

            // There must be MPQ header signature
            if(dwHeaderID == ID_MPQ)
            {
                // Save the position where the MPQ header has been found
                if(ha->pUserData == NULL)
                    ha->UserDataPos = SearchPos;
                ha->pHeader = (TMPQHeader *)ha->HeaderData;
                ha->MpqPos = SearchPos;

                // Now convert the header to version 4
                BSWAP_TMPQHEADER(ha->pHeader);
                nError = ConvertMpqHeaderToFormat4(ha, FileSize, dwFlags);
                break;
            }

            // Move to the next possible offset
            SearchPos += 0x200;
        }

        // If we haven't found MPQ header in the file, it's an error
        if(ha->pHeader == NULL)
            nError = ERROR_BAD_FORMAT;
    }

    // Fix table positions according to format
    if(nError == ERROR_SUCCESS)
    {
        // Dump the header
//      DumpMpqHeader(ha->pHeader);

        // W3x Map Protectors use the fact that War3's Storm.dll ignores the MPQ user data,
        // and probably ignores the MPQ format version as well. The trick is to
        // fake MPQ format 2, with an improper hi-word position of hash table and block table
        // We can overcome such protectors by forcing opening the archive as MPQ v 1.0
        if(dwFlags & MPQ_OPEN_FORCE_MPQ_V1)
        {
            ha->pHeader->wFormatVersion = MPQ_FORMAT_VERSION_1;
            ha->pHeader->dwHeaderSize = MPQ_HEADER_SIZE_V1;
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;
            ha->pUserData = NULL;
        }

        // Both MPQ_OPEN_NO_LISTFILE or MPQ_OPEN_NO_ATTRIBUTES trigger read only mode
        if(dwFlags & (MPQ_OPEN_NO_LISTFILE | MPQ_OPEN_NO_ATTRIBUTES))
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;

        // Set the default file flags for (listfile) and (attributes)
        ha->dwFileFlags1 =
        ha->dwFileFlags2 = MPQ_FILE_ENCRYPTED | MPQ_FILE_COMPRESS | MPQ_FILE_REPLACEEXISTING;

        // Set the size of file sector
        ha->dwSectorSize = (0x200 << ha->pHeader->wSectorSize);

        // Verify if any of the tables doesn't start beyond the end of the file
        nError = VerifyMpqTablePositions(ha, FileSize);
    }

    // Read the hash table. Ignore the result, as hash table is no longer required
    // Read HET table. Ignore the result, as HET table is no longer required
    if(nError == ERROR_SUCCESS)
    {
        nError = LoadAnyHashTable(ha);
    }

    // Now, build the file table. It will be built by combining
    // the block table, BET table, hi-block table, (attributes) and (listfile).
    if(nError == ERROR_SUCCESS)
    {
        nError = BuildFileTable(ha, FileSize);
    }

    // Verify the file table, if no kind of protection was detected
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_PROTECTED) == 0)
    {
        TFileEntry * pFileTableEnd = ha->pFileTable + ha->pHeader->dwBlockTableSize;
        TFileEntry * pFileEntry = ha->pFileTable;
//      ULONGLONG ArchiveSize = 0;
        ULONGLONG RawFilePos;

        // Parse all file entries
        for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
        {
            // If that file entry is valid, check the file position
            if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // Get the 64-bit file position,
                // relative to the begin of the file
                RawFilePos = ha->MpqPos + pFileEntry->ByteOffset;

                // Begin of the file must be within range
                if(RawFilePos > FileSize)
                {
                    nError = ERROR_FILE_CORRUPT;
                    break;
                }

                // End of the file must be within range
                RawFilePos += pFileEntry->dwCmpSize;
                if(RawFilePos > FileSize)
                {
                    nError = ERROR_FILE_CORRUPT;
                    break;
                }

                // Also, we remember end of the file
//              if(RawFilePos > ArchiveSize)
//                  ArchiveSize = RawFilePos;
            }
        }
    }

    // Load the internal listfile and include it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_LISTFILE) == 0)
    {
        // Ignore result of the operation. (listfile) is optional.
        SFileAddListFile((HANDLE)ha, NULL);
    }

    // Load the "(attributes)" file and merge it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_ATTRIBUTES) == 0)
    {
        // Ignore result of the operation. (attributes) is optional.
        SAttrLoadAttributes(ha);
    }

    // Cleanup and exit
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeMPQArchive(ha);
        SetLastError(nError);
        ha = NULL;
    }

    *phMpq = ha;
    return (nError == ERROR_SUCCESS);
}