コード例 #1
0
static int LoadFilePatch_BSD0(TMPQFile * hf, PMPQ_PATCH_HEADER pFullPatch)
{
    unsigned char * pbDecompressed = (unsigned char *)(pFullPatch + 1);
    unsigned char * pbCompressed = NULL;
    size_t cbDecompressed = 0;
    size_t cbCompressed = 0;
    size_t dwBytesRead = 0;
    int nError = ERROR_SUCCESS;

    /* Calculate the size of compressed data */
    cbDecompressed = pFullPatch->dwSizeOfPatchData - sizeof(MPQ_PATCH_HEADER);
    cbCompressed = pFullPatch->dwXfrmBlockSize - SIZE_OF_XFRM_HEADER;

    /* Is that file compressed? */
    if(cbCompressed < cbDecompressed)
    {
        pbCompressed = STORM_ALLOC(uint8_t, cbCompressed);
        if(pbCompressed == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;

        /* Read the compressed patch data */
        if(nError == ERROR_SUCCESS)
        {
            SFileReadFile((void *)hf, pbCompressed, cbCompressed, &dwBytesRead);
            if(dwBytesRead != cbCompressed)
                nError = ERROR_FILE_CORRUPT;
        }

        /* Decompress the data */
        if(nError == ERROR_SUCCESS)
            Decompress_RLE(pbDecompressed, cbDecompressed, pbCompressed, cbCompressed);

        if(pbCompressed != NULL)
            STORM_FREE(pbCompressed);
    }
    else
    {
        SFileReadFile((void *)hf, pbDecompressed, cbDecompressed, &dwBytesRead);
        if(dwBytesRead != cbDecompressed)
            nError = ERROR_FILE_CORRUPT;
    }

    return nError;
}
コード例 #2
0
static bool FindPatchPrefix_SC2_MatchFiles(TMPQArchive * haBase, TMPQArchive * haPatch, TFileEntry * pBaseEntry)
{
    TMPQNamePrefix * pPatchPrefix;
    char * szPatchFileName;
    char * szPlainName;
    size_t cchWorkBuffer = 0x400;
    bool bResult = false;

    // First-level patches: Find the same file within the patch archive
    // and verify by MD5-before-patch
    if(haBase->haPatch == NULL)
    {
        TFileEntry * pFileTableEnd = haPatch->pFileTable + haPatch->dwFileTableSize;
        TFileEntry * pFileEntry;

        // Allocate working buffer for merging LST file
        szPatchFileName = STORM_ALLOC(char, cchWorkBuffer);
        if(szPatchFileName != NULL)
        {
            // Parse the entire file table
            for(pFileEntry = haPatch->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
            {
                // Look for "patch_metadata" file
                if(IsPatchMetadataFile(pFileEntry))
                {
                    // Construct the name of the MD5 file
                    strcpy(szPatchFileName, pFileEntry->szFileName);
                    szPlainName = (char *)GetPlainFileName(szPatchFileName);
                    strcpy(szPlainName, pBaseEntry->szFileName);

                    // Check for matching MD5 file
                    if(IsMatchingPatchFile(haPatch, szPatchFileName, pBaseEntry->md5))
                    {
                        bResult = CreatePatchPrefix(haPatch, szPatchFileName, (size_t)(szPlainName - szPatchFileName));
                        break;
                    }
                }
            }

            // Delete the merge buffer
            STORM_FREE(szPatchFileName);
        }
    }
コード例 #3
0
ファイル: SCompression.cpp プロジェクト: BattleNWar/YDWE
static void Compress_PKLIB(void * pvOutBuffer, int * pcbOutBuffer, void * pvInBuffer, int cbInBuffer, int * pCmpType, int nCmpLevel)
{
    TDataInfo Info;                                      // Data information
    char * work_buf = STORM_ALLOC(char, CMP_BUFFER_SIZE);// Pklib's work buffer
    unsigned int dict_size;                              // Dictionary size
    unsigned int ctype = CMP_BINARY;                     // Compression type

    // Keep compilers happy
    STORMLIB_UNUSED(pCmpType);
    STORMLIB_UNUSED(nCmpLevel);

    // Handle no-memory condition
    if(work_buf != NULL)
    {
        // Fill data information structure
        memset(work_buf, 0, CMP_BUFFER_SIZE);
        Info.pbInBuff     = (unsigned char *)pvInBuffer;
        Info.pbInBuffEnd  = (unsigned char *)pvInBuffer + cbInBuffer;
        Info.pbOutBuff    = (unsigned char *)pvOutBuffer;
        Info.pbOutBuffEnd = (unsigned char *)pvOutBuffer + *pcbOutBuffer;

        //
        // Set the dictionary size
        //
        // Diablo I uses fixed dictionary size of CMP_IMPLODE_DICT_SIZE3
        // Starcraft I uses the variable dictionary size based on algorithm below
        //

        if (cbInBuffer < 0x600)
            dict_size = CMP_IMPLODE_DICT_SIZE1;
        else if(0x600 <= cbInBuffer && cbInBuffer < 0xC00)
            dict_size = CMP_IMPLODE_DICT_SIZE2;
        else
            dict_size = CMP_IMPLODE_DICT_SIZE3;

        // Do the compression
        if(implode(ReadInputData, WriteOutputData, work_buf, &Info, &ctype, &dict_size) == CMP_NO_ERROR)
            *pcbOutBuffer = (int)(Info.pbOutBuff - (unsigned char *)pvOutBuffer);

        STORM_FREE(work_buf);
    }
}
コード例 #4
0
static void Compress_PKLIB(
        char * pbOutBuffer,
        int * pcbOutBuffer,
        char * pbInBuffer,
        int cbInBuffer,
        int * /* pCmpType */,
        int /* nCmpLevel */)
{
    TDataInfo Info;                                      // Data information
    char * work_buf = STORM_ALLOC(char, CMP_BUFFER_SIZE);// Pklib's work buffer
    unsigned int dict_size;                              // Dictionary size
    unsigned int ctype = CMP_BINARY;                     // Compression type

    // Fill data information structure
    memset(work_buf, 0, CMP_BUFFER_SIZE);
    Info.pbInBuff     = pbInBuffer;
    Info.pbInBuffEnd  = pbInBuffer + cbInBuffer;
    Info.pbOutBuff    = pbOutBuffer;
    Info.pbOutBuffEnd = pbOutBuffer + *pcbOutBuffer;

    //
    // Set the dictionary size
    //
    // Diablo I ues fixed dictionary size of CMP_IMPLODE_DICT_SIZE3
    // Starcraft uses the variable dictionary size based on algorithm below
    //

    if (cbInBuffer < 0x600)
        dict_size = CMP_IMPLODE_DICT_SIZE1;
    else if(0x600 <= cbInBuffer && cbInBuffer < 0xC00)
        dict_size = CMP_IMPLODE_DICT_SIZE2;
    else
        dict_size = CMP_IMPLODE_DICT_SIZE3;

    // Do the compression
    if(implode(ReadInputData, WriteOutputData, work_buf, &Info, &ctype, &dict_size) == CMP_NO_ERROR)
        *pcbOutBuffer = (int)(Info.pbOutBuff - pbOutBuffer);

    STORM_FREE(work_buf);
}
コード例 #5
0
static int ApplyMpqPatch_COPY(
    TMPQFile * hf,
    TPatchHeader * pPatchHeader)
{
    LPBYTE pbNewFileData;
    DWORD cbNewFileData;

    // Allocate space for new file data
    cbNewFileData = pPatchHeader->dwXfrmBlockSize - SIZE_OF_XFRM_HEADER;
    pbNewFileData = STORM_ALLOC(BYTE, cbNewFileData);
    if(pbNewFileData == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Copy the patch data as-is
    memcpy(pbNewFileData, (LPBYTE)pPatchHeader + sizeof(TPatchHeader), cbNewFileData);

    // Free the old file data
    STORM_FREE(hf->pbFileData);

    // Put the new file data there
    hf->pbFileData = pbNewFileData;
    hf->cbFileData = cbNewFileData;
    return ERROR_SUCCESS;
}
コード例 #6
0
static int Decompress_PKLIB(char * pbOutBuffer, int * pcbOutBuffer, char * pbInBuffer, int cbInBuffer)
{
    TDataInfo Info;                             // Data information
    char * work_buf = STORM_ALLOC(char, EXP_BUFFER_SIZE);// Pklib's work buffer

    // Fill data information structure
    memset(work_buf, 0, EXP_BUFFER_SIZE);
    Info.pbInBuff     = pbInBuffer;
    Info.pbInBuffEnd  = pbInBuffer + cbInBuffer;
    Info.pbOutBuff    = pbOutBuffer;
    Info.pbOutBuffEnd = pbOutBuffer + *pcbOutBuffer;

    // Do the decompression
    explode(ReadInputData, WriteOutputData, work_buf, &Info);

    // If PKLIB is unable to decompress the data, return 0;
    if(Info.pbOutBuff == pbOutBuffer)
        return 0;

    // Give away the number of decompressed bytes
    *pcbOutBuffer = (int)(Info.pbOutBuff - pbOutBuffer);
    STORM_FREE(work_buf);
    return 1;
}
コード例 #7
0
static int ApplyMpqPatch_BSD0(
    TMPQFile * hf,
    TPatchHeader * pPatchHeader)
{
    PBLIZZARD_BSDIFF40_FILE pBsdiff;
    LPDWORD pCtrlBlock;
    LPBYTE pbPatchData = (LPBYTE)pPatchHeader + sizeof(TPatchHeader);
    LPBYTE pDataBlock;
    LPBYTE pExtraBlock;
    LPBYTE pbNewData = NULL;
    LPBYTE pbOldData = (LPBYTE)hf->pbFileData;
    DWORD dwNewOffset = 0;                          // Current position to patch
    DWORD dwOldOffset = 0;                          // Current source position
    DWORD dwNewSize;                                // Patched file size
    DWORD dwOldSize = hf->cbFileData;               // File size before patch

    // Get pointer to the patch header
    // Format of BSDIFF header corresponds to original BSDIFF, which is:
    // 0000   8 bytes   signature "BSDIFF40"
    // 0008   8 bytes   size of the control block
    // 0010   8 bytes   size of the data block
    // 0018   8 bytes   new size of the patched file
    pBsdiff = (PBLIZZARD_BSDIFF40_FILE)pbPatchData;
    pbPatchData += sizeof(BLIZZARD_BSDIFF40_FILE);

    // Get pointer to the 32-bit BSDIFF control block
    // The control block follows immediately after the BSDIFF header
    // and consists of three 32-bit integers
    // 0000   4 bytes   Length to copy from the BSDIFF data block the new file
    // 0004   4 bytes   Length to copy from the BSDIFF extra block
    // 0008   4 bytes   Size to increment source file offset
    pCtrlBlock = (LPDWORD)pbPatchData;
    pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->CtrlBlockSize);

    // Get the pointer to the data block
    pDataBlock = (LPBYTE)pbPatchData;
    pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->DataBlockSize);

    // Get the pointer to the extra block
    pExtraBlock = (LPBYTE)pbPatchData;
    dwNewSize = (DWORD)BSWAP_INT64_UNSIGNED(pBsdiff->NewFileSize);

    // Allocate new buffer
    pbNewData = STORM_ALLOC(BYTE, dwNewSize);
    if(pbNewData == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Now patch the file
    while(dwNewOffset < dwNewSize)
    {
        DWORD dwAddDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[0]);
        DWORD dwMovDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[1]);
        DWORD dwOldMoveLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[2]);
        DWORD i;

        // Sanity check
        if((dwNewOffset + dwAddDataLength) > dwNewSize)
        {
            STORM_FREE(pbNewData);
            return ERROR_FILE_CORRUPT;
        }

        // Read the diff string to the target buffer
        memcpy(pbNewData + dwNewOffset, pDataBlock, dwAddDataLength);
        pDataBlock += dwAddDataLength;

        // Now combine the patch data with the original file
        for(i = 0; i < dwAddDataLength; i++)
        {
            if(dwOldOffset < dwOldSize)
                pbNewData[dwNewOffset] = pbNewData[dwNewOffset] + pbOldData[dwOldOffset];

            dwNewOffset++;
            dwOldOffset++;
        }

        // Sanity check
        if((dwNewOffset + dwMovDataLength) > dwNewSize)
        {
            STORM_FREE(pbNewData);
            return ERROR_FILE_CORRUPT;
        }

        // Copy the data from the extra block in BSDIFF patch
        memcpy(pbNewData + dwNewOffset, pExtraBlock, dwMovDataLength);
        pExtraBlock += dwMovDataLength;
        dwNewOffset += dwMovDataLength;

        // Move the old offset
        if(dwOldMoveLength & 0x80000000)
            dwOldMoveLength = 0x80000000 - dwOldMoveLength;
        dwOldOffset += dwOldMoveLength;
        pCtrlBlock += 3;
    }

    // Free the old file data
    STORM_FREE(hf->pbFileData);

    // Put the new data to the fil structure
    hf->pbFileData = pbNewData;
    hf->cbFileData = dwNewSize;
    return ERROR_SUCCESS;
}
コード例 #8
0
int EXPORT_SYMBOL SFileCompactArchive(void * hMpq, const char * szListFile, int bReserved)
{
    TFileStream * pTempStream = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    uint64_t ByteOffset;
    uint64_t ByteCount;
    uint32_t * pFileKeys = NULL;
    char szTempFile[1024] = "";
    char * szTemp = NULL;
    int nError = ERROR_SUCCESS;

    /* Test the valid parameters */
    if(!IsValidMpqHandle(hMpq))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    /* If the MPQ is changed at this moment, we have to flush the archive */
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED))
    {
        SFileFlushArchive(hMpq);
    }

    /* Create the table with file keys */
    if(nError == ERROR_SUCCESS)
    {
        if((pFileKeys = STORM_ALLOC(uint32_t, ha->dwFileTableSize)) != NULL)
            memset(pFileKeys, 0, sizeof(uint32_t) * ha->dwFileTableSize);
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    /* First of all, we have to check of we are able to decrypt all files. */
    /* If not, sorry, but the archive cannot be compacted. */
    if(nError == ERROR_SUCCESS)
    {
        /* Initialize the progress variables for compact callback */
        FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes));
        ha->CompactBytesProcessed = 0;
        nError = CheckIfAllKeysKnown(ha, szListFile, pFileKeys);
    }

    /* Get the temporary file name and create it */
    if(nError == ERROR_SUCCESS)
    {
        strcpy(szTempFile, FileStream_GetFileName(ha->pStream));
        if((szTemp = strrchr(szTempFile, '.')) != NULL)
            strcpy(szTemp + 1, "mp_");
        else
            strcat(szTempFile, "_");

        pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
        if(pTempStream == NULL)
            nError = GetLastError();
    }

    /* Write the data before MPQ user data (if any) */
    if(nError == ERROR_SUCCESS && ha->UserDataPos != 0)
    {
        /* Inform the application about the progress */
        if(ha->pfnCompactCB != NULL)
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes);

        ByteOffset = 0;
        ByteCount = ha->UserDataPos;
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
    }

    /* Write the MPQ user data (if any) */
    if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos)
    {
        /* At this point, we assume that the user data size is equal */
        /* to pUserData->dwHeaderOffs. */
        /* If this assumption doesn't work, then we have an unknown version of MPQ */
        ByteOffset = ha->UserDataPos;
        ByteCount = ha->MpqPos - ha->UserDataPos;

        assert(ha->pUserData != NULL);
        assert(ha->pUserData->dwHeaderOffs == ByteCount);
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
    }

    /* Write the MPQ header */
    if(nError == ERROR_SUCCESS)
    {
        TMPQHeader SaveMpqHeader;

        /* Write the MPQ header to the file */
        memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4);
        if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize))
            nError = GetLastError();

        /* Update the progress */
        ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize;
    }

    /* Now copy all files */
    if(nError == ERROR_SUCCESS)
        nError = CopyMpqFiles(ha, pFileKeys, pTempStream);

    /* If succeeded, switch the streams */
    if(nError == ERROR_SUCCESS)
    {
        ha->dwFlags |= MPQ_FLAG_CHANGED;
        if(FileStream_Replace(ha->pStream, pTempStream))
            pTempStream = NULL;
        else
            nError = ERROR_CAN_NOT_COMPLETE;
    }

    /* Final user notification */
    if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL)
    {
        ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
        ha->CompactBytesProcessed += (ha->dwFileTableSize * sizeof(TMPQBlock));
        ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes);
    }

    /* Cleanup and return */
    if(pTempStream != NULL)
        FileStream_Close(pTempStream);
    if(pFileKeys != NULL)
        STORM_FREE(pFileKeys);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
コード例 #9
0
static int ReadMpqFileSingleUnit(TMPQFile * hf, void * pvBuffer, DWORD dwFilePos, DWORD dwToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos = hf->RawFilePos;
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbCompressed = NULL;
    LPBYTE pbRawData = NULL;
    bool bIsReallyCompressed = false;
    int nError = ERROR_SUCCESS;

    // If the file buffer is not allocated yet, do it.
    if(hf->pbFileSector == NULL)
    {
        nError = AllocateSectorBuffer(hf);
        if(nError != ERROR_SUCCESS)
            return nError;
        pbRawData = hf->pbFileSector;
    }

    // If the file is a patch file, adjust raw data offset
    if(hf->pPatchInfo != NULL)
        RawFilePos += hf->pPatchInfo->dwLength;

    // If the file sector is not loaded yet, do it
    if(hf->dwSectorOffs != 0)
    {
        // Is the file compressed?
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
        {
            // Allocate space for compressed data
            pbCompressed = STORM_ALLOC(BYTE, pFileEntry->dwCmpSize);
            if(pbCompressed == NULL)
                return ERROR_NOT_ENOUGH_MEMORY;
            bIsReallyCompressed = true;
            pbRawData = pbCompressed;
        }

        // Load the raw (compressed, encrypted) data
        if(!FileStream_Read(ha->pStream, &RawFilePos, pbRawData, pFileEntry->dwCmpSize))
        {
            STORM_FREE(pbCompressed);
            return GetLastError();
        }

        // If the file is encrypted, we have to decrypt the data first
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
            DecryptMpqBlock(pbRawData, pFileEntry->dwCmpSize, hf->dwFileKey);
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
        }

        //
        // In "wow-update-12694.MPQ" from Wow-Cataclysm BETA:
        //
        // File                                    CmpSize FileSize  Data
        // --------------------------------------  ------- --------  ---------------
        // esES\DBFilesClient\LightSkyBox.dbc      0xBE    0xBC      Is compressed
        // deDE\DBFilesClient\MountCapability.dbc  0x93    0x77      Is uncompressed
        //
        // Now tell me how to deal with this mess.
        //

        if(hf->pPatchInfo != NULL)
        {
            if(pbRawData[0] == 'P' && pbRawData[1] == 'T' && pbRawData[2] == 'C' && pbRawData[3] == 'H')
            {
                assert(pFileEntry->dwCmpSize >= hf->dwDataSize);
                bIsReallyCompressed = false;
            }
        }
        else
        {
            if(pFileEntry->dwCmpSize >= hf->dwDataSize)
                bIsReallyCompressed = false;
        }

        // If the file is compressed, we have to decompress it now
        if(bIsReallyCompressed)
        {
            int cbOutBuffer = (int)hf->dwDataSize;

            // Note: Single unit files compressed with IMPLODE are not supported by Blizzard
            if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
            {
                if(!SCompExplode((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, (int)pFileEntry->dwCmpSize))
                    nError = ERROR_FILE_CORRUPT;
            }
            if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
            {
                if(!SCompDecompress((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, (int)pFileEntry->dwCmpSize))
                    nError = ERROR_FILE_CORRUPT;
            }
        }
        else
        {
            if(pbRawData != hf->pbFileSector)
                memcpy(hf->pbFileSector, pbRawData, hf->dwDataSize);
        }

        // Free the decompression buffer.
        if(pbCompressed != NULL)
            STORM_FREE(pbCompressed);

        // The file sector is now properly loaded
        hf->dwSectorOffs = 0;
    }

    // At this moment, we have the file loaded into the file buffer.
    // Copy as much as the caller wants
    if(nError == ERROR_SUCCESS && hf->dwSectorOffs == 0)
    {
        // File position is greater or equal to file size ?
        if(dwFilePos >= hf->dwDataSize)
        {
            *pdwBytesRead = 0;
            return ERROR_SUCCESS;
        }

        // If not enough bytes remaining in the file, cut them
        if((hf->dwDataSize - dwFilePos) < dwToRead)
            dwToRead = (hf->dwDataSize - dwFilePos);

        // Copy the bytes
        memcpy(pvBuffer, hf->pbFileSector + dwFilePos, dwToRead);

        // Give the number of bytes read
        *pdwBytesRead = dwToRead;
        return ERROR_SUCCESS;
    }

    // An error, sorry
    return ERROR_CAN_NOT_COMPLETE;
}
コード例 #10
0
ファイル: SFileAddFile.cpp プロジェクト: 3DViking/MistCore
static int RecryptFileData(
    TMPQArchive * ha,
    TMPQFile * hf,
    const char * szFileName,
    const char * szNewFileName)
{
    ULONGLONG RawFilePos;
    TFileEntry * pFileEntry = hf->pFileEntry;
    DWORD dwBytesToRecrypt = pFileEntry->dwCmpSize;
    DWORD dwOldKey;
    DWORD dwNewKey;
    int nError = ERROR_SUCCESS;

    // The file must be encrypted
    assert(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED);

    // File decryption key is calculated from the plain name
    szNewFileName = GetPlainFileNameA(szNewFileName);
    szFileName = GetPlainFileNameA(szFileName);

    // Calculate both file keys
    dwOldKey = DecryptFileKey(szFileName,    pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);
    dwNewKey = DecryptFileKey(szNewFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);

    // Incase the keys are equal, don't recrypt the file
    if(dwNewKey == dwOldKey)
        return ERROR_SUCCESS;
    hf->dwFileKey = dwOldKey;

    // Calculate the raw position of the file in the archive
    hf->MpqFilePos = pFileEntry->ByteOffset;
    hf->RawFilePos = ha->MpqPos + hf->MpqFilePos;

    // Allocate buffer for file transfer
    nError = AllocateSectorBuffer(hf);
    if(nError != ERROR_SUCCESS)
        return nError;

    // Also allocate buffer for sector offsets
    // Note: Don't load sector checksums, we don't need to recrypt them
    nError = AllocateSectorOffsets(hf, true);
    if(nError != ERROR_SUCCESS)
        return nError;

    // If we have sector offsets, recrypt these as well
    if(hf->SectorOffsets != NULL)
    {
        // Allocate secondary buffer for sectors copy
        DWORD * SectorOffsetsCopy = (DWORD *)STORM_ALLOC(BYTE, hf->SectorOffsets[0]);
        DWORD dwSectorOffsLen = hf->SectorOffsets[0];

        if(SectorOffsetsCopy == NULL)
            return ERROR_NOT_ENOUGH_MEMORY;

        // Recrypt the array of sector offsets
        memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
        EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwNewKey - 1);
        BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);

        // Write the recrypted array back
        if(!FileStream_Write(ha->pStream, &hf->RawFilePos, SectorOffsetsCopy, dwSectorOffsLen))
            nError = GetLastError();
        STORM_FREE(SectorOffsetsCopy);
    }

    // Now we have to recrypt all file sectors. We do it without
    // recompression, because recompression is not necessary in this case
    if(nError == ERROR_SUCCESS)
    {
        for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            DWORD dwRawDataInSector = hf->dwSectorSize;
            DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;

            // Last sector: If there is not enough bytes remaining in the file, cut the raw size
            if(dwRawDataInSector > dwBytesToRecrypt)
                dwRawDataInSector = dwBytesToRecrypt;

            // Fix the raw data length if the file is compressed
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            // Calculate the raw file offset of the file sector
            CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset);

            // Read the file sector
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // If necessary, re-encrypt the sector
            // Note: Recompression is not necessary here. Unlike encryption, 
            // the compression does not depend on the position of the file in MPQ.
            BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
            DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwOldKey + dwSector);
            EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwNewKey + dwSector);
            BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);

            // Write the sector back
            if(!FileStream_Write(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // Decrement number of bytes remaining
            dwBytesToRecrypt -= hf->dwSectorSize;
        }
    }

    return nError;
}
コード例 #11
0
// Copies all file sectors into another archive.
static int CopyMpqFileSectors(
    TMPQArchive * ha,
    TMPQFile * hf,
    TFileStream * pNewStream)
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    ULONGLONG RawFilePos;               // Used for calculating sector offset in the old MPQ archive
    ULONGLONG MpqFilePos;               // MPQ file position in the new archive
    DWORD dwBytesToCopy = pFileEntry->dwCmpSize;
    DWORD dwPatchSize = 0;              // Size of patch header
    DWORD dwFileKey1 = 0;               // File key used for decryption
    DWORD dwFileKey2 = 0;               // File key used for encryption
    DWORD dwCmpSize = 0;                // Compressed file size, including patch header
    int nError = ERROR_SUCCESS;

    // Remember the position in the destination file
    FileStream_GetPos(pNewStream, &MpqFilePos);
    MpqFilePos -= ha->MpqPos;

    // Resolve decryption keys. Note that the file key given 
    // in the TMPQFile structure also includes the key adjustment
    if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED))
    {
        dwFileKey2 = dwFileKey1 = hf->dwFileKey;
        if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
        {
            dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (DWORD)pFileEntry->ByteOffset;
            dwFileKey2 = (dwFileKey2 + (DWORD)MpqFilePos) ^ pFileEntry->dwFileSize;
        }
    }

    // If we have to save patch header, do it
    if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL)
    {
        BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(DWORD) * 3);
        if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength))
            nError = GetLastError();

        // Save the size of the patch info
        dwPatchSize = hf->pPatchInfo->dwLength;
    }

    // If we have to save sector offset table, do it.
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL)
    {
        DWORD * SectorOffsetsCopy = STORM_ALLOC(DWORD, hf->SectorOffsets[0] / sizeof(DWORD));
        DWORD dwSectorOffsLen = hf->SectorOffsets[0];

        assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0);
        assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK);

        if(SectorOffsetsCopy == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;

        // Encrypt the secondary sector offset table and write it to the target file
        if(nError == ERROR_SUCCESS)
        {
            memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
            if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1);

            BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
            if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen))
                nError = GetLastError();

            dwBytesToCopy -= dwSectorOffsLen;
            dwCmpSize += dwSectorOffsLen;
        }

        // Update compact progress
        if(ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += dwSectorOffsLen;
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }

        STORM_FREE(SectorOffsetsCopy);
    }

    // Now we have to copy all file sectors. We do it without
    // recompression, because recompression is not necessary in this case
    if(nError == ERROR_SUCCESS)
    {
        for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            DWORD dwRawDataInSector = hf->dwSectorSize;
            DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;

            // Fix the raw data length if the file is compressed
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            // Last sector: If there is not enough bytes remaining in the file, cut the raw size
            if(dwRawDataInSector > dwBytesToCopy)
                dwRawDataInSector = dwBytesToCopy;

            // Calculate the raw file offset of the file sector
            CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset);
            
            // Read the file sector
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // If necessary, re-encrypt the sector
            // Note: Recompression is not necessary here. Unlike encryption, 
            // the compression does not depend on the position of the file in MPQ.
            if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2)
            {
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector);
                EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector);
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
            }

            // Now write the sector back to the file
            if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            // Update compact progress
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwRawDataInSector;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            // Adjust byte counts
            dwBytesToCopy -= dwRawDataInSector;
            dwCmpSize += dwRawDataInSector;
        }
    }

    // Copy the sector CRCs, if any
    // Sector CRCs are always compressed (not imploded) and unencrypted
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL)
    {
        DWORD dwCrcLength;

        dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount];
        if(dwCrcLength != 0)
        {
            if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            // Update compact progress
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwCrcLength;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            // Size of the CRC block is also included in the compressed file size
            dwBytesToCopy -= dwCrcLength;
            dwCmpSize += dwCrcLength;
        }
    }

    // There might be extra data beyond sector checksum table
    // Sometimes, these data are even part of sector offset table
    // Examples:
    // 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc
    // 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml
    if(nError == ERROR_SUCCESS && dwBytesToCopy != 0)
    {
        LPBYTE pbExtraData;

        // Allocate space for the extra data
        pbExtraData = STORM_ALLOC(BYTE, dwBytesToCopy);
        if(pbExtraData != NULL)
        {
            if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            // Include these extra data in the compressed size
            dwCmpSize += dwBytesToCopy;
            STORM_FREE(pbExtraData);
        }
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Write the MD5's of the raw file data, if needed
    if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0)
    {
        nError = WriteMpqDataMD5(pNewStream, 
                                 ha->MpqPos + MpqFilePos,
                                 pFileEntry->dwCmpSize,
                                 ha->pHeader->dwRawChunkSize);
    }

    // Update file position in the block table
    if(nError == ERROR_SUCCESS)
    {
        // At this point, number of bytes written should be exactly
        // the same like the compressed file size. If it isn't,
        // there's something wrong (an unknown archive version, MPQ malformation, ...)
        // 
        // Note: Diablo savegames have very weird layout, and the file "hero"
        // seems to have improper compressed size. Instead of real compressed size,
        // the "dwCmpSize" member of the block table entry contains
        // uncompressed size of file data + size of the sector table.
        // If we compact the archive, Diablo will refuse to load the game
        //
        // Note: Some patch files in WOW patches don't count the patch header
        // into compressed size
        //

        if(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize)
        {
            // Note: DO NOT update the compressed size in the file entry, no matter how bad it is.
            pFileEntry->ByteOffset = MpqFilePos;
        }
        else
        {
            nError = ERROR_FILE_CORRUPT;
            assert(false);
        }
    }

    return nError;
}
コード例 #12
0
bool WINAPI SFileSetMaxFileCount(HANDLE hMpq, DWORD dwMaxFileCount)
{
    TMPQHetTable * pOldHetTable = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    TFileEntry * pOldFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
    TFileEntry * pOldFileTable = NULL;
    TFileEntry * pOldFileEntry;
    TFileEntry * pFileEntry;
    TMPQHash * pOldHashTable = NULL;
    DWORD dwOldHashTableSize = 0;
    DWORD dwOldFileTableSize = 0;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if(!IsValidMpqHandle(ha))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    // The new limit must not be lower than the index of the last file entry in the table
    if(nError == ERROR_SUCCESS && ha->dwFileTableSize > dwMaxFileCount)
        nError = ERROR_DISK_FULL;

    // ALL file names must be known in order to be able
    // to rebuild hash table size
    if(nError == ERROR_SUCCESS)
    {
        nError = CheckIfAllFilesKnown(ha, NULL, NULL);
    }

    // If the MPQ has a hash table, then we relocate the hash table
    if(nError == ERROR_SUCCESS && ha->pHashTable != NULL)
    {
        // Save parameters for the current hash table
        dwOldHashTableSize = ha->pHeader->dwHashTableSize;
        pOldHashTable = ha->pHashTable;

        // Allocate new hash table
        ha->pHeader->dwHashTableSize = GetHashTableSizeForFileCount(dwMaxFileCount);
        ha->pHashTable = STORM_ALLOC(TMPQHash, ha->pHeader->dwHashTableSize);
        if(ha->pHashTable != NULL)
            memset(ha->pHashTable, 0xFF, ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // If the MPQ has HET table, allocate new one as well
    if(nError == ERROR_SUCCESS && ha->pHetTable != NULL)
    {
        // Save the original HET table
        pOldHetTable = ha->pHetTable;

        // Create new one
        ha->pHetTable = CreateHetTable(dwMaxFileCount, 0x40, true);
        if(ha->pHetTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now reallocate the file table
    if(nError == ERROR_SUCCESS)
    {
        // Save the current file table
        dwOldFileTableSize = ha->dwFileTableSize;
        pOldFileTable = ha->pFileTable;

        // Create new one
        ha->pFileTable = STORM_ALLOC(TFileEntry, dwMaxFileCount);
        if(ha->pFileTable != NULL)
            memset(ha->pFileTable, 0, dwMaxFileCount * sizeof(TFileEntry));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now we have to build both classic hash table and HET table.
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwFileIndex = 0;
        DWORD dwHashIndex = 0;

        // Create new hash and HET entry for each file
        pFileEntry = ha->pFileTable;
        for(pOldFileEntry = pOldFileTable; pOldFileEntry < pOldFileTableEnd; pOldFileEntry++)
        {
            if(pOldFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // Copy the old file entry to the new one
                memcpy(pFileEntry, pOldFileEntry, sizeof(TFileEntry));
                assert(pFileEntry->szFileName != NULL);
                
                // Create new entry in the hash table
                if(ha->pHashTable != NULL)
                {
                    dwHashIndex = AllocateHashEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Create new entry in the HET table, if needed
                if(ha->pHetTable != NULL)
                {
                    dwHashIndex = AllocateHetEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Move to the next file entry in the new table
                pFileEntry++;
                dwFileIndex++;
            }
        }
    }

    // Mark the archive as changed
    // Note: We always have to rebuild the (attributes) file due to file table change
    if(nError == ERROR_SUCCESS)
    {
        ha->dwMaxFileCount = dwMaxFileCount;
        InvalidateInternalFiles(ha);
    }
    else
    {
        // Revert the hash table
        if(ha->pHashTable != NULL && pOldHashTable != NULL)
        {
            STORM_FREE(ha->pHashTable);
            ha->pHeader->dwHashTableSize = dwOldHashTableSize;
            ha->pHashTable = pOldHashTable;
        }

        // Revert the HET table
        if(ha->pHetTable != NULL && pOldHetTable != NULL)
        {
            FreeHetTable(ha->pHetTable);
            ha->pHetTable = pOldHetTable;
        }

        // Revert the file table
        if(pOldFileTable != NULL)
        {
            STORM_FREE(ha->pFileTable);
            ha->pFileTable = pOldFileTable;
        }

        SetLastError(nError);
    }

    // Return the result
    return (nError == ERROR_SUCCESS);
}
コード例 #13
0
ファイル: SFileListFile.cpp プロジェクト: DoGoodS/StormLib
// Saves the whole listfile into the MPQ.
int SListFileSaveToMpq(TMPQArchive * ha)
{
    TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
    TFileEntry * pFileEntry;
    TMPQFile * hf = NULL;
    char * szPrevItem;
    char ** SortTable = NULL;
    DWORD dwFileSize = 0;
    size_t nFileNodes = 0;
    size_t i;
    int nError = ERROR_SUCCESS;

    // Allocate the table for sorting listfile
    SortTable = STORM_ALLOC(char*, ha->dwFileTableSize);
    if(SortTable == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Construct the sort table
    // Note: in MPQs with multiple locale versions of the same file,
    // this code causes adding multiple listfile entries.
    // Since those MPQs were last time used in Starcraft,
    // we leave it as it is.
    for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
    {
        // Only take existing items
        if((pFileEntry->dwFlags & MPQ_FILE_EXISTS) && pFileEntry->szFileName != NULL)
        {
            // Ignore pseudo-names
            if(!IsPseudoFileName(pFileEntry->szFileName, NULL) && !IsInternalMpqFileName(pFileEntry->szFileName))
            {
                SortTable[nFileNodes++] = pFileEntry->szFileName;
            }
        }
    }

    // Sort the table
    qsort(SortTable, nFileNodes, sizeof(char *), CompareFileNodes);

    // Now parse the table of file names again - remove duplicates
    // and count file size.
    if(nFileNodes != 0)
    {
        // Count the 0-th item
        dwFileSize += (DWORD)strlen(SortTable[0]) + 2;
        szPrevItem = SortTable[0];
        
        // Count all next items
        for(i = 1; i < nFileNodes; i++)
        {
            // If the item is the same like the last one, skip it
            if(_stricmp(SortTable[i], szPrevItem))
            {
                dwFileSize += (DWORD)strlen(SortTable[i]) + 2;
                szPrevItem = SortTable[i];
            }
        }

        // Determine the flags for (listfile)
        if(ha->dwFileFlags1 == 0)
            ha->dwFileFlags1 = GetDefaultSpecialFileFlags(ha, dwFileSize);

        // Create the listfile in the MPQ
        nError = SFileAddFile_Init(ha, LISTFILE_NAME,
                                       0,
                                       dwFileSize,
                                       LANG_NEUTRAL,
                                       ha->dwFileFlags1 | MPQ_FILE_REPLACEEXISTING,
                                      &hf);
        // Add all file names
        if(nError == ERROR_SUCCESS)
        {
            // Each name is followed by newline ("\x0D\x0A")
            szPrevItem = SortTable[0];
            nError = WriteListFileLine(hf, SortTable[0]);

            // Count all next items
            for(i = 1; i < nFileNodes; i++)
            {
                // If the item is the same like the last one, skip it
                if(_stricmp(SortTable[i], szPrevItem))
                {
                    WriteListFileLine(hf, SortTable[i]);
                    szPrevItem = SortTable[i];
                }
            }
        }
    }
    else
    {
        // Create the listfile in the MPQ
        dwFileSize = (DWORD)strlen(LISTFILE_NAME) + 2;
        nError = SFileAddFile_Init(ha, LISTFILE_NAME,
                                       0,
                                       dwFileSize,
                                       LANG_NEUTRAL,
                                       MPQ_FILE_ENCRYPTED | MPQ_FILE_COMPRESS | MPQ_FILE_REPLACEEXISTING,
                                      &hf);

        // Just add "(listfile)" there
        if(nError == ERROR_SUCCESS)
        {
            WriteListFileLine(hf, LISTFILE_NAME);
        }
    }

    // Finalize the file in the MPQ
    if(hf != NULL)
    {
        SFileAddFile_Finish(hf);
    }
    
    // Free buffers
    if(nError == ERROR_SUCCESS)
        ha->dwFlags &= ~MPQ_FLAG_INV_LISTFILE;
    if(SortTable != NULL)
        STORM_FREE(SortTable);
    return nError;
}
コード例 #14
0
ファイル: SFileGetFileInfo.cpp プロジェクト: BuloZB/StormLib
bool WINAPI SFileGetFileInfo(
    HANDLE hMpqOrFile,
    SFileInfoClass InfoClass,
    void * pvFileInfo,
    DWORD cbFileInfo,
    LPDWORD pcbLengthNeeded)
{
    MPQ_SIGNATURE_INFO SignatureInfo;
    TMPQArchive * ha = NULL;
    TFileEntry * pFileEntry = NULL;
    ULONGLONG Int64Value = 0;
    ULONGLONG ByteOffset = 0;
    TMPQHash * pHash;
    TMPQFile * hf = NULL;
    void * pvSrcFileInfo = NULL;
    DWORD cbSrcFileInfo = 0;
    DWORD dwInt32Value = 0;
    int nInfoType = SFILE_INFO_TYPE_INVALID_HANDLE;
    int nError = ERROR_SUCCESS;

    switch(InfoClass)
    {
        case SFileMpqFileName:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = (void *)FileStream_GetFileName(ha->pStream);
                cbSrcFileInfo = (DWORD)(_tcslen((TCHAR *)pvSrcFileInfo) + 1) * sizeof(TCHAR);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqStreamBitmap:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
                return FileStream_GetBitmap(ha->pStream, pvFileInfo, cbFileInfo, pcbLengthNeeded);
            break;

        case SFileMpqUserDataOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(ha->pUserData != NULL)
                {
                    pvSrcFileInfo = &ha->UserDataPos;
                    cbSrcFileInfo = sizeof(ULONGLONG);
                    nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
                }
            }
            break;

        case SFileMpqUserDataHeader:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(ha->pUserData != NULL)
                {
                    ByteOffset = ha->UserDataPos;
                    cbSrcFileInfo = sizeof(TMPQUserData);
                    nInfoType = SFILE_INFO_TYPE_READ_FROM_FILE;
                }
            }
            break;

        case SFileMpqUserData:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(ha->pUserData != NULL)
                {
                    ByteOffset = ha->UserDataPos + sizeof(TMPQUserData);
                    cbSrcFileInfo = ha->pUserData->dwHeaderOffs - sizeof(TMPQUserData);
                    nInfoType = SFILE_INFO_TYPE_READ_FROM_FILE;
                }
            }
            break;

        case SFileMpqHeaderOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->MpqPos;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHeaderSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->dwHeaderSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHeader:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                ByteOffset = ha->MpqPos;
                cbSrcFileInfo = ha->pHeader->dwHeaderSize;
                nInfoType = SFILE_INFO_TYPE_READ_FROM_FILE;
            }
            break;

        case SFileMpqHetTableOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->HetTablePos64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHetTableSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->HetTableSize64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHetHeader:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                pvSrcFileInfo = LoadExtTable(ha, ha->pHeader->HetTablePos64, (size_t)ha->pHeader->HetTableSize64, HET_TABLE_SIGNATURE, MPQ_KEY_HASH_TABLE);
                if(pvSrcFileInfo != NULL)
                {
                    cbSrcFileInfo = sizeof(TMPQHetHeader);
                    nInfoType = SFILE_INFO_TYPE_ALLOCATED;
                }
            }
            break;

        case SFileMpqHetTable:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                pvSrcFileInfo = LoadHetTable(ha);
                if(pvSrcFileInfo != NULL)
                {
                    cbSrcFileInfo = sizeof(void *);
                    nInfoType = SFILE_INFO_TYPE_TABLE_POINTER;
                }
            }
            break;

        case SFileMpqBetTableOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->BetTablePos64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBetTableSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->BetTableSize64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBetHeader:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                pvSrcFileInfo = LoadExtTable(ha, ha->pHeader->BetTablePos64, (size_t)ha->pHeader->BetTableSize64, BET_TABLE_SIGNATURE, MPQ_KEY_BLOCK_TABLE);
                if(pvSrcFileInfo != NULL)
                {
                    // It is allowed for the caller to only require BET header.
                    cbSrcFileInfo = sizeof(TMPQBetHeader) + ((TMPQBetHeader *)pvSrcFileInfo)->dwFlagCount * sizeof(DWORD);
                    if(cbFileInfo == sizeof(TMPQBetHeader))
                        cbSrcFileInfo = sizeof(TMPQBetHeader);
                    nInfoType = SFILE_INFO_TYPE_ALLOCATED;
                }
            }
            break;

        case SFileMpqBetTable:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                pvSrcFileInfo = LoadBetTable(ha);
                if(pvSrcFileInfo != NULL)
                {
                    cbSrcFileInfo = sizeof(void *);
                    nInfoType = SFILE_INFO_TYPE_TABLE_POINTER;
                }
            }
            break;

        case SFileMpqHashTableOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                Int64Value = MAKE_OFFSET64(ha->pHeader->wHashTablePosHi, ha->pHeader->dwHashTablePos);
                pvSrcFileInfo = &Int64Value;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHashTableSize64:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->HashTableSize64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHashTableSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->dwHashTableSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHashTable:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL && ha->pHashTable != NULL)
            {
                pvSrcFileInfo = ha->pHashTable;
                cbSrcFileInfo = ha->dwHashTableSize * sizeof(TMPQHash);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBlockTableOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                Int64Value = MAKE_OFFSET64(ha->pHeader->wBlockTablePosHi, ha->pHeader->dwBlockTablePos);
                pvSrcFileInfo = &Int64Value;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBlockTableSize64:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->BlockTableSize64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBlockTableSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->dwBlockTableSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqBlockTable:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(MAKE_OFFSET64(ha->pHeader->wBlockTablePosHi, ha->pHeader->dwBlockTablePos) != 0)
                {
                    cbSrcFileInfo = ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock);
                    if(cbFileInfo >= cbSrcFileInfo)
                        pvSrcFileInfo = LoadBlockTable(ha, true);
                    nInfoType = SFILE_INFO_TYPE_ALLOCATED;
                }
            }
            break;

        case SFileMpqHiBlockTableOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->HiBlockTablePos64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHiBlockTableSize64:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->HiBlockTableSize64;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqHiBlockTable:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(ha->pHeader->HiBlockTablePos64 && ha->pHeader->HiBlockTableSize64)
                {
                    assert(false);
                }
            }
            break;

        case SFileMpqSignatures:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL && QueryMpqSignatureInfo(ha, &SignatureInfo))
            {
                pvSrcFileInfo = &SignatureInfo.SignatureTypes;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqStrongSignatureOffset:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(QueryMpqSignatureInfo(ha, &SignatureInfo) && (SignatureInfo.SignatureTypes & SIGNATURE_TYPE_STRONG))
                {
                    pvSrcFileInfo = &SignatureInfo.EndMpqData;
                    cbSrcFileInfo = sizeof(ULONGLONG);
                    nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
                }
            }
            break;

        case SFileMpqStrongSignatureSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(QueryMpqSignatureInfo(ha, &SignatureInfo) && (SignatureInfo.SignatureTypes & SIGNATURE_TYPE_STRONG))
                {
                    dwInt32Value = MPQ_STRONG_SIGNATURE_SIZE + 4;
                    pvSrcFileInfo = &dwInt32Value;
                    cbSrcFileInfo = sizeof(DWORD);
                    nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
                }
            }
            break;

        case SFileMpqStrongSignature:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(QueryMpqSignatureInfo(ha, &SignatureInfo) && (SignatureInfo.SignatureTypes & SIGNATURE_TYPE_STRONG))
                {
                    pvSrcFileInfo = SignatureInfo.Signature;
                    cbSrcFileInfo = MPQ_STRONG_SIGNATURE_SIZE + 4;
                    nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
                }
            }
            break;

        case SFileMpqArchiveSize64:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->ArchiveSize64;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqArchiveSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->pHeader->dwArchiveSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqMaxFileCount:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->dwMaxFileCount;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqFileTableSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->dwFileTableSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqSectorSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &ha->dwSectorSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqNumberOfFiles:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                dwInt32Value = GetMpqFileCount(ha);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqRawChunkSize:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                nInfoType = SFILE_INFO_TYPE_NOT_FOUND;
                if(ha->pHeader->dwRawChunkSize != 0)
                {
                    pvSrcFileInfo = &ha->pHeader->dwRawChunkSize;
                    cbSrcFileInfo = sizeof(DWORD);
                    nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
                }
            }
            break;

        case SFileMpqStreamFlags:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                FileStream_GetFlags(ha->pStream, &dwInt32Value);
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileMpqFlags:
            ha = IsValidMpqHandle(hMpqOrFile);
            if(ha != NULL)
            {
                dwInt32Value  = ha->dwFlags;
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoPatchChain:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL)
                return GetFilePatchChain(hf, pvFileInfo, cbFileInfo, pcbLengthNeeded);
            break;

        case SFileInfoFileEntry:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = pFileEntry = hf->pFileEntry;
                cbSrcFileInfo = sizeof(TFileEntry);
                if(pFileEntry->szFileName != NULL)
                    cbSrcFileInfo += (DWORD)strlen(pFileEntry->szFileName) + 1;
                nInfoType = SFILE_INFO_TYPE_FILE_ENTRY;
            }
            break;

        case SFileInfoHashEntry:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->ha != NULL && hf->ha->pHashTable != NULL)
            {
                pvSrcFileInfo = hf->ha->pHashTable + hf->pFileEntry->dwHashIndex;
                cbSrcFileInfo = sizeof(TMPQHash);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoHashIndex:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->dwHashIndex;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoNameHash1:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->ha != NULL && hf->ha->pHashTable != NULL)
            {
                pHash = hf->ha->pHashTable + hf->pFileEntry->dwHashIndex;
                pvSrcFileInfo = &pHash->dwName1;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoNameHash2:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->ha != NULL && hf->ha->pHashTable != NULL)
            {
                pHash = hf->ha->pHashTable + hf->pFileEntry->dwHashIndex;
                pvSrcFileInfo = &pHash->dwName2;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoNameHash3:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->FileNameHash;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoLocale:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                dwInt32Value = hf->pFileEntry->lcLocale;
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoFileIndex:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->ha != NULL && hf->pFileEntry != NULL)
            {
                dwInt32Value = (DWORD)(hf->pFileEntry - hf->ha->pFileTable);
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoByteOffset:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->ByteOffset;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoFileTime:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->FileTime;
                cbSrcFileInfo = sizeof(ULONGLONG);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoFileSize:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->dwFileSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoCompressedSize:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->dwCmpSize;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoFlags:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                pvSrcFileInfo = &hf->pFileEntry->dwFlags;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoEncryptionKey:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL)
            {
                pvSrcFileInfo = &hf->dwFileKey;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        case SFileInfoEncryptionKeyRaw:
            hf = IsValidFileHandle(hMpqOrFile);
            if(hf != NULL && hf->pFileEntry != NULL)
            {
                dwInt32Value = hf->dwFileKey;
                if(hf->pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
                    dwInt32Value = (dwInt32Value ^ hf->pFileEntry->dwFileSize) - (DWORD)hf->MpqFilePos;
                pvSrcFileInfo = &dwInt32Value;
                cbSrcFileInfo = sizeof(DWORD);
                nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
            }
            break;

        default:    // Invalid info class
            SetLastError(ERROR_INVALID_PARAMETER);
            return false;
    }

    // If we validated the handle and info class, give as much info as possible
    if(nInfoType >= SFILE_INFO_TYPE_DIRECT_POINTER)
    {
        // Give the length needed, if wanted
        if(pcbLengthNeeded != NULL)
            pcbLengthNeeded[0] = cbSrcFileInfo;

        // If the caller entered an output buffer, the output size must also be entered
        if(pvFileInfo != NULL && cbFileInfo != 0)
        {
            // Check if there is enough space in the output buffer
            if(cbSrcFileInfo <= cbFileInfo)
            {
                switch(nInfoType)
                {
                    case SFILE_INFO_TYPE_DIRECT_POINTER:
                    case SFILE_INFO_TYPE_ALLOCATED:
                        assert(pvSrcFileInfo != NULL);
                        memcpy(pvFileInfo, pvSrcFileInfo, cbSrcFileInfo);
                        break;

                    case SFILE_INFO_TYPE_READ_FROM_FILE:
                        if(!FileStream_Read(ha->pStream, &ByteOffset, pvFileInfo, cbSrcFileInfo))
                            nError = GetLastError();
                        break;

                    case SFILE_INFO_TYPE_TABLE_POINTER:
                        assert(pvSrcFileInfo != NULL);
                        *(void **)pvFileInfo = pvSrcFileInfo;
                        pvSrcFileInfo = NULL;
                        break;

                    case SFILE_INFO_TYPE_FILE_ENTRY:
                        assert(pFileEntry != NULL);
                        ConvertFileEntryToSelfRelative((TFileEntry *)pvFileInfo, pFileEntry);
                        break;
                }
            }
            else
            {
                nError = ERROR_INSUFFICIENT_BUFFER;
            }
        }

        // Free the file info if needed
        if(nInfoType == SFILE_INFO_TYPE_ALLOCATED && pvSrcFileInfo != NULL)
            STORM_FREE(pvSrcFileInfo);
        if(nInfoType == SFILE_INFO_TYPE_TABLE_POINTER && pvSrcFileInfo != NULL)
            SFileFreeFileInfo(pvSrcFileInfo, InfoClass);
    }
    else
    {
        // Handle error cases
        if(nInfoType == SFILE_INFO_TYPE_INVALID_HANDLE)
            nError = ERROR_INVALID_HANDLE;
        if(nInfoType == SFILE_INFO_TYPE_NOT_FOUND)
            nError = ERROR_FILE_NOT_FOUND;
    }

    // Set the last error value, if needed
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
コード例 #15
0
bool WINAPI SFileAddFileEx(
    HANDLE hMpq,
    const TCHAR * szFileName,
    const char * szArchivedName,
    DWORD dwFlags,
    DWORD dwCompression,            // Compression of the first sector
    DWORD dwCompressionNext)        // Compression of next sectors
{
    ULONGLONG FileSize = 0;
    ULONGLONG FileTime = 0;
    TFileStream * pStream = NULL;
    HANDLE hMpqFile = NULL;
    LPBYTE pbFileData = NULL;
    DWORD dwBytesRemaining = 0;
    DWORD dwBytesToRead;
    DWORD dwSectorSize = 0x1000;
    DWORD dwChannels = 0;
    bool bIsAdpcmCompression = false;
    bool bIsFirstSector = true;
    int nError = ERROR_SUCCESS;

    // Check parameters
    if(hMpq == NULL || szFileName == NULL || *szFileName == 0)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // Open added file
    pStream = FileStream_OpenFile(szFileName, STREAM_FLAG_READ_ONLY | STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
    if(pStream == NULL)
        return false;

    // Files bigger than 4GB cannot be added to MPQ
    FileStream_GetTime(pStream, &FileTime);
    FileStream_GetSize(pStream, &FileSize);
    if(FileSize >> 32)
        nError = ERROR_DISK_FULL;

    // Allocate data buffer for reading from the source file
    if(nError == ERROR_SUCCESS)
    {
        dwBytesRemaining = (DWORD)FileSize;
        pbFileData = STORM_ALLOC(BYTE, dwSectorSize);
        if(pbFileData == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Deal with various combination of compressions
    if(nError == ERROR_SUCCESS)
    {
        // When the compression for next blocks is set to default,
        // we will copy the compression for the first sector
        if(dwCompressionNext == MPQ_COMPRESSION_NEXT_SAME)
            dwCompressionNext = dwCompression;
        
        // If the caller wants ADPCM compression, we make sure
        // that the first sector is not compressed with lossy compression
        if(dwCompressionNext & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
        {
            // The compression of the first file sector must not be ADPCM
            // in order not to corrupt the headers
            if(dwCompression & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
                dwCompression = MPQ_COMPRESSION_PKWARE;
            
            // Remove both flag mono and stereo flags.
            // They will be re-added according to WAVE type
            dwCompressionNext &= ~(MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO);
            bIsAdpcmCompression = true;
        }

        // Initiate adding file to the MPQ
        if(!SFileCreateFile(hMpq, szArchivedName, FileTime, (DWORD)FileSize, lcFileLocale, dwFlags, &hMpqFile))
            nError = GetLastError();
    }

    // Write the file data to the MPQ
    while(nError == ERROR_SUCCESS && dwBytesRemaining != 0)
    {
        // Get the number of bytes remaining in the source file
        dwBytesToRead = dwBytesRemaining;
        if(dwBytesToRead > dwSectorSize)
            dwBytesToRead = dwSectorSize;

        // Read data from the local file
        if(!FileStream_Read(pStream, NULL, pbFileData, dwBytesToRead))
        {
            nError = GetLastError();
            break;
        }

        // If the file being added is a WAVE file, we check number of channels
        if(bIsFirstSector && bIsAdpcmCompression)
        {
            // The file must really be a WAVE file with at least 16 bits per sample,
            // otherwise the ADPCM compression will corrupt it
            if(IsWaveFile_16BitsPerAdpcmSample(pbFileData, dwBytesToRead, &dwChannels))
            {
                // Setup the compression of next sectors according to number of channels
                dwCompressionNext |= (dwChannels == 1) ? MPQ_COMPRESSION_ADPCM_MONO : MPQ_COMPRESSION_ADPCM_STEREO;
            }
            else
            {
                // Setup the compression of next sectors to a lossless compression
                dwCompressionNext = (dwCompression & MPQ_LOSSY_COMPRESSION_MASK) ? MPQ_COMPRESSION_PKWARE : dwCompression;
            }

            bIsFirstSector = false;
        }

        // Add the file sectors to the MPQ
        if(!SFileWriteFile(hMpqFile, pbFileData, dwBytesToRead, dwCompression))
        {
            nError = GetLastError();
            break;
        }

        // Set the next data compression
        dwBytesRemaining -= dwBytesToRead;
        dwCompression = dwCompressionNext;
    }

    // Finish the file writing
    if(hMpqFile != NULL)
    {
        if(!SFileFinishFile(hMpqFile))
            nError = GetLastError();
    }

    // Cleanup and exit
    if(pbFileData != NULL)
        STORM_FREE(pbFileData);
    if(pStream != NULL)
        FileStream_Close(pStream);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
コード例 #16
0
static int ReadMpqFileSingleUnit(TMPQFile * hf, void * pvBuffer, DWORD dwFilePos, DWORD dwToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos = hf->RawFilePos;
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbCompressed = NULL;
    LPBYTE pbRawData;
    int nError = ERROR_SUCCESS;

    // If the file buffer is not allocated yet, do it.
    if(hf->pbFileSector == NULL)
    {
        nError = AllocateSectorBuffer(hf);
        if(nError != ERROR_SUCCESS || hf->pbFileSector == NULL)
            return nError;
    }

    // If the file is a patch file, adjust raw data offset
    if(hf->pPatchInfo != NULL)
        RawFilePos += hf->pPatchInfo->dwLength;
    pbRawData = hf->pbFileSector;

    // If the file sector is not loaded yet, do it
    if(hf->dwSectorOffs != 0)
    {
        // Is the file compressed?
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK)
        {
            // Allocate space for compressed data
            pbCompressed = STORM_ALLOC(BYTE, pFileEntry->dwCmpSize);
            if(pbCompressed == NULL)
                return ERROR_NOT_ENOUGH_MEMORY;
            pbRawData = pbCompressed;
        }
        
        // Load the raw (compressed, encrypted) data
        if(!FileStream_Read(ha->pStream, &RawFilePos, pbRawData, pFileEntry->dwCmpSize))
        {
            STORM_FREE(pbCompressed);
            return GetLastError();
        }

        // If the file is encrypted, we have to decrypt the data first
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
            DecryptMpqBlock(pbRawData, pFileEntry->dwCmpSize, hf->dwFileKey);
            BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
        }

        // If the file is compressed, we have to decompress it now
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK)
        {
            int cbOutBuffer = (int)hf->dwDataSize;
            int cbInBuffer = (int)pFileEntry->dwCmpSize;
            int nResult = 0;

            //
            // If the file is an incremental patch, the size of compressed data
            // is determined as pFileEntry->dwCmpSize - sizeof(TPatchInfo)
            //
            // In "wow-update-12694.MPQ" from Wow-Cataclysm BETA:
            //
            // File                                    CmprSize   DcmpSize DataSize Compressed?
            // --------------------------------------  ---------- -------- -------- ---------------
            // esES\DBFilesClient\LightSkyBox.dbc      0xBE->0xA2  0xBC     0xBC     Yes
            // deDE\DBFilesClient\MountCapability.dbc  0x93->0x77  0x77     0x77     No
            // 

            if(pFileEntry->dwFlags & MPQ_FILE_PATCH_FILE)
                cbInBuffer = cbInBuffer - sizeof(TPatchInfo);

            // Is the file compressed by Blizzard's multiple compression ?
            if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
            {
                // Remember the last used compression
                hf->dwCompression0 = pbRawData[0];

                // Decompress the file
                if(ha->pHeader->wFormatVersion >= MPQ_FORMAT_VERSION_2)
                    nResult = SCompDecompress2(hf->pbFileSector, &cbOutBuffer, pbRawData, cbInBuffer);
                else
                    nResult = SCompDecompress(hf->pbFileSector, &cbOutBuffer, pbRawData, cbInBuffer);
            }

            // Is the file compressed by PKWARE Data Compression Library ?
            // Note: Single unit files compressed with IMPLODE are not supported by Blizzard
            else if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
                nResult = SCompExplode(hf->pbFileSector, &cbOutBuffer, pbRawData, cbInBuffer);

            nError = (nResult != 0) ? ERROR_SUCCESS : ERROR_FILE_CORRUPT;
        }
        else
        {
            if(hf->pbFileSector != NULL && pbRawData != hf->pbFileSector)
                memcpy(hf->pbFileSector, pbRawData, hf->dwDataSize);
        }

        // Free the decompression buffer.
        if(pbCompressed != NULL)
            STORM_FREE(pbCompressed);

        // The file sector is now properly loaded
        hf->dwSectorOffs = 0;
    }

    // At this moment, we have the file loaded into the file buffer.
    // Copy as much as the caller wants
    if(nError == ERROR_SUCCESS && hf->dwSectorOffs == 0)
    {
        // File position is greater or equal to file size ?
        if(dwFilePos >= hf->dwDataSize)
        {
            *pdwBytesRead = 0;
            return ERROR_SUCCESS;
        }

        // If not enough bytes remaining in the file, cut them
        if((hf->dwDataSize - dwFilePos) < dwToRead)
            dwToRead = (hf->dwDataSize - dwFilePos);

        // Copy the bytes
        memcpy(pvBuffer, hf->pbFileSector + dwFilePos, dwToRead);

        // Give the number of bytes read
        *pdwBytesRead = dwToRead;
        return ERROR_SUCCESS;
    }

    // An error, sorry
    return ERROR_CAN_NOT_COMPLETE;
}
コード例 #17
0
static int ReadMpkFileSingleUnit(TMPQFile * hf, void * pvBuffer, DWORD dwFilePos, DWORD dwToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos = hf->RawFilePos + 0x0C;   // For some reason, MPK files start at position (hf->RawFilePos + 0x0C)
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbCompressed = NULL;
    LPBYTE pbRawData = hf->pbFileSector;
    int nError = ERROR_SUCCESS;

    // We do not support patch files in MPK archives
    assert(hf->pPatchInfo == NULL);

    // If the file buffer is not allocated yet, do it.
    if(hf->pbFileSector == NULL)
    {
        nError = AllocateSectorBuffer(hf);
        if(nError != ERROR_SUCCESS || hf->pbFileSector == NULL)
            return nError;
        pbRawData = hf->pbFileSector;
    }

    // If the file sector is not loaded yet, do it
    if(hf->dwSectorOffs != 0)
    {
        // Is the file compressed?
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK)
        {
            // Allocate space for compressed data
            pbCompressed = STORM_ALLOC(BYTE, pFileEntry->dwCmpSize);
            if(pbCompressed == NULL)
                return ERROR_NOT_ENOUGH_MEMORY;
            pbRawData = pbCompressed;
        }
        
        // Load the raw (compressed, encrypted) data
        if(!FileStream_Read(ha->pStream, &RawFilePos, pbRawData, pFileEntry->dwCmpSize))
        {
            STORM_FREE(pbCompressed);
            return GetLastError();
        }

        // If the file is encrypted, we have to decrypt the data first
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            DecryptMpkTable(pbRawData, pFileEntry->dwCmpSize);
        }

        // If the file is compressed, we have to decompress it now
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK)
        {
            int cbOutBuffer = (int)hf->dwDataSize;

            hf->dwCompression0 = pbRawData[0];
            if(!SCompDecompressMpk(hf->pbFileSector, &cbOutBuffer, pbRawData, (int)pFileEntry->dwCmpSize))
                nError = ERROR_FILE_CORRUPT;
        }
        else
        {
            if(pbRawData != hf->pbFileSector)
                memcpy(hf->pbFileSector, pbRawData, hf->dwDataSize);
        }

        // Free the decompression buffer.
        if(pbCompressed != NULL)
            STORM_FREE(pbCompressed);

        // The file sector is now properly loaded
        hf->dwSectorOffs = 0;
    }

    // At this moment, we have the file loaded into the file buffer.
    // Copy as much as the caller wants
    if(nError == ERROR_SUCCESS && hf->dwSectorOffs == 0)
    {
        // File position is greater or equal to file size ?
        if(dwFilePos >= hf->dwDataSize)
        {
            *pdwBytesRead = 0;
            return ERROR_SUCCESS;
        }

        // If not enough bytes remaining in the file, cut them
        if((hf->dwDataSize - dwFilePos) < dwToRead)
            dwToRead = (hf->dwDataSize - dwFilePos);

        // Copy the bytes
        memcpy(pvBuffer, hf->pbFileSector + dwFilePos, dwToRead);

        // Give the number of bytes read
        *pdwBytesRead = dwToRead;
        return ERROR_SUCCESS;
    }

    // An error, sorry
    return ERROR_CAN_NOT_COMPLETE;
}
コード例 #18
0
bool WINAPI SFileCompactArchive(HANDLE hMpq, const char * szListFile, bool /* bReserved */)
{
    TFileStream * pTempStream = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    ULONGLONG ByteOffset;
    ULONGLONG ByteCount;
    LPDWORD pFileKeys = NULL;
    TCHAR szTempFile[MAX_PATH] = _T("");
    TCHAR * szTemp = NULL;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if(!IsValidMpqHandle(hMpq))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    // If the MPQ is changed at this moment, we have to flush the archive
    if(nError == ERROR_SUCCESS && (ha->dwFlags & MPQ_FLAG_CHANGED))
    {
        SFileFlushArchive(hMpq);
    }

    // Create the table with file keys
    if(nError == ERROR_SUCCESS)
    {
        if((pFileKeys = STORM_ALLOC(DWORD, ha->dwFileTableSize)) != NULL)
            memset(pFileKeys, 0, sizeof(DWORD) * ha->dwFileTableSize);
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // First of all, we have to check of we are able to decrypt all files.
    // If not, sorry, but the archive cannot be compacted.
    if(nError == ERROR_SUCCESS)
    {
        // Initialize the progress variables for compact callback
        FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes));
        ha->CompactBytesProcessed = 0;
        nError = CheckIfAllFilesKnown(ha, szListFile, pFileKeys);
    }

    // Get the temporary file name and create it
    if(nError == ERROR_SUCCESS)
    {
        _tcscpy(szTempFile, FileStream_GetFileName(ha->pStream));
        if((szTemp = _tcsrchr(szTempFile, '.')) != NULL)
            _tcscpy(szTemp + 1, _T("mp_"));
        else
            _tcscat(szTempFile, _T("_"));

        pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
        if(pTempStream == NULL)
            nError = GetLastError();
    }

    // Write the data before MPQ user data (if any)
    if(nError == ERROR_SUCCESS && ha->UserDataPos != 0)
    {
        // Inform the application about the progress
        if(ha->pfnCompactCB != NULL)
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes);

        ByteOffset = 0;
        ByteCount = ha->UserDataPos;
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount);
    }

    // Write the MPQ user data (if any)
    if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos)
    {
        // At this point, we assume that the user data size is equal
        // to pUserData->dwHeaderOffs.
        // If this assumption doesn't work, then we have an unknown version of MPQ
        ByteOffset = ha->UserDataPos;
        ByteCount = ha->MpqPos - ha->UserDataPos;

        assert(ha->pUserData != NULL);
        assert(ha->pUserData->dwHeaderOffs == ByteCount);
        nError = CopyNonMpqData(ha, ha->pStream, pTempStream, ByteOffset, ByteCount);
    }

    // Write the MPQ header
    if(nError == ERROR_SUCCESS)
    {
        TMPQHeader SaveMpqHeader;

        // Write the MPQ header to the file
        memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3);
        BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4);
        if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize))
            nError = GetLastError();

        // Update the progress
        ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize;
    }

    // Now copy all files
    if(nError == ERROR_SUCCESS)
        nError = CopyMpqFiles(ha, pFileKeys, pTempStream);

    // Defragment the file table
    if(nError == ERROR_SUCCESS)
        nError = RebuildFileTable(ha, ha->pHeader->dwHashTableSize, ha->dwMaxFileCount);

    // We also need to rebuild the HET table, if any
    if(nError == ERROR_SUCCESS)
    {
        // Invalidate (listfile) and (attributes)
        InvalidateInternalFiles(ha);

        // Rebuild the HET table, if we have any
        if(ha->pHetTable != NULL)
            nError = RebuildHetTable(ha);
    }

    // If succeeded, switch the streams
    if(nError == ERROR_SUCCESS)
    {
        if(FileStream_Replace(ha->pStream, pTempStream))
            pTempStream = NULL;
        else
            nError = ERROR_CAN_NOT_COMPLETE;
    }

    // If all succeeded, save the MPQ tables
    if(nError == ERROR_SUCCESS)
    {
        //
        // Note: We don't recalculate position of the MPQ tables at this point.
        // SaveMPQTables does it automatically.
        // 

        nError = SaveMPQTables(ha);
        if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
            ha->CompactBytesProcessed += (ha->pHeader->dwBlockTableSize * sizeof(TMPQBlock));
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }
    }

    // Cleanup and return
    if(pTempStream != NULL)
        FileStream_Close(pTempStream);
    if(pFileKeys != NULL)
        STORM_FREE(pFileKeys);
    if(nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
コード例 #19
0
ファイル: SFileAddFile.cpp プロジェクト: 3DViking/MistCore
static int WriteDataToMpqFile(
    TMPQArchive * ha,
    TMPQFile * hf,
    LPBYTE pbFileData,
    DWORD dwDataSize,
    DWORD dwCompression)
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    ULONGLONG ByteOffset;
    LPBYTE pbCompressed = NULL;         // Compressed (target) data
    LPBYTE pbToWrite = NULL;            // Data to write to the file
    int nCompressionLevel = -1;         // ADPCM compression level (only used for wave files)
    int nError = ERROR_SUCCESS;

    // If the caller wants ADPCM compression, we will set wave compression level to 4,
    // which corresponds to medium quality
    if(dwCompression & LOSSY_COMPRESSION_MASK)
        nCompressionLevel = 4;

    // Make sure that the caller won't overrun the previously initiated file size
    assert(hf->dwFilePos + dwDataSize <= pFileEntry->dwFileSize);
    assert(hf->dwSectorCount != 0);
    assert(hf->pbFileSector != NULL);
    if((hf->dwFilePos + dwDataSize) > pFileEntry->dwFileSize)
        return ERROR_DISK_FULL;
    pbToWrite = hf->pbFileSector;

    // Now write all data to the file sector buffer
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwBytesInSector = hf->dwFilePos % hf->dwSectorSize;
        DWORD dwSectorIndex = hf->dwFilePos / hf->dwSectorSize;
        DWORD dwBytesToCopy;

        // Process all data. 
        while(dwDataSize != 0)
        {
            dwBytesToCopy = dwDataSize;
                
            // Check for sector overflow
            if(dwBytesToCopy > (hf->dwSectorSize - dwBytesInSector))
                dwBytesToCopy = (hf->dwSectorSize - dwBytesInSector);

            // Copy the data to the file sector
            memcpy(hf->pbFileSector + dwBytesInSector, pbFileData, dwBytesToCopy);
            dwBytesInSector += dwBytesToCopy;
            pbFileData += dwBytesToCopy;
            dwDataSize -= dwBytesToCopy;

            // Update the file position
            hf->dwFilePos += dwBytesToCopy;

            // If the current sector is full, or if the file is already full,
            // then write the data to the MPQ
            if(dwBytesInSector >= hf->dwSectorSize || hf->dwFilePos >= pFileEntry->dwFileSize)
            {
                // Set the position in the file
                ByteOffset = hf->RawFilePos + pFileEntry->dwCmpSize;

                // Update CRC32 and MD5 of the file
                md5_process((hash_state *)hf->hctx, hf->pbFileSector, dwBytesInSector);
                hf->dwCrc32 = crc32(hf->dwCrc32, hf->pbFileSector, dwBytesInSector);

                // Compress the file sector, if needed
                if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
                {
                    int nOutBuffer = (int)dwBytesInSector;
                    int nInBuffer = (int)dwBytesInSector;

                    // If the file is compressed, allocate buffer for the compressed data.
                    // Note that we allocate buffer that is a bit longer than sector size,
                    // for case if the compression method performs a buffer overrun
                    if(pbCompressed == NULL)
                    {
                        pbToWrite = pbCompressed = STORM_ALLOC(BYTE, hf->dwSectorSize + 0x100);
                        if(pbCompressed == NULL)
                        {
                            nError = ERROR_NOT_ENOUGH_MEMORY;
                            break;
                        }
                    }

                    //
                    // Note that both SCompImplode and SCompCompress give original buffer,
                    // if they are unable to comperss the data.
                    //

                    if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
                    {
                        SCompImplode((char *)pbCompressed,
                                            &nOutBuffer,
                                     (char *)hf->pbFileSector,
                                             nInBuffer);
                    }

                    if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
                    {
                        SCompCompress((char *)pbCompressed,
                                             &nOutBuffer,
                                      (char *)hf->pbFileSector,
                                              nInBuffer,
                                    (unsigned)dwCompression,
                                              0,
                                              nCompressionLevel);
                    }

                    // Update sector positions
                    dwBytesInSector = nOutBuffer;
                    if(hf->SectorOffsets != NULL)
                        hf->SectorOffsets[dwSectorIndex+1] = hf->SectorOffsets[dwSectorIndex] + dwBytesInSector;

                    // We have to calculate sector CRC, if enabled
                    if(hf->SectorChksums != NULL)
                        hf->SectorChksums[dwSectorIndex] = adler32(0, pbCompressed, nOutBuffer);
                }                 

                // Encrypt the sector, if necessary
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                {
                    BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector);
                    EncryptMpqBlock(pbToWrite, dwBytesInSector, hf->dwFileKey + dwSectorIndex);
                    BSWAP_ARRAY32_UNSIGNED(pbToWrite, dwBytesInSector);
                }

                // Write the file sector
                if(!FileStream_Write(ha->pStream, &ByteOffset, pbToWrite, dwBytesInSector))
                {
                    nError = GetLastError();
                    break;
                }

                // Call the compact callback, if any
                if(AddFileCB != NULL)
                    AddFileCB(pvUserData, hf->dwFilePos, hf->dwDataSize, false);

                // Update the compressed file size
                pFileEntry->dwCmpSize += dwBytesInSector;
                dwBytesInSector = 0;
                dwSectorIndex++;
            }
        }
    }

    // Cleanup
    if(pbCompressed != NULL)
        STORM_FREE(pbCompressed);
    return nError;
}
コード例 #20
0
ファイル: SFileVerify.cpp プロジェクト: 3DViking/MistCore
static bool CalculateMpqHashMd5(
    TMPQArchive * ha,
    PMPQ_SIGNATURE_INFO pSI,
    LPBYTE pMd5Digest)
{
    hash_state md5_state;
    ULONGLONG BeginBuffer;
    ULONGLONG EndBuffer;
    LPBYTE pbDigestBuffer = NULL;

    // Allocate buffer for creating the MPQ digest.
    pbDigestBuffer = STORM_ALLOC(BYTE, MPQ_DIGEST_UNIT_SIZE);
    if(pbDigestBuffer == NULL)
        return false;

    // Initialize the MD5 hash state
    md5_init(&md5_state);

    // Set the byte offset of begin of the data
    BeginBuffer = pSI->BeginMpqData;

    // Create the digest
    for(;;)
    {
        ULONGLONG BytesRemaining;
        LPBYTE pbSigBegin = NULL;
        LPBYTE pbSigEnd = NULL;
        DWORD dwToRead = MPQ_DIGEST_UNIT_SIZE;

        // Check the number of bytes remaining
        BytesRemaining = pSI->EndMpqData - BeginBuffer;
        if(BytesRemaining < MPQ_DIGEST_UNIT_SIZE)
            dwToRead = (DWORD)BytesRemaining;
        if(dwToRead == 0)
            break;

        // Read the next chunk 
        if(!FileStream_Read(ha->pStream, &BeginBuffer, pbDigestBuffer, dwToRead))
        {
            STORM_FREE(pbDigestBuffer);
            return false;
        }

        // Move the current byte offset
        EndBuffer = BeginBuffer + dwToRead;

        // Check if the signature is within the loaded digest
        if(BeginBuffer <= pSI->BeginExclude && pSI->BeginExclude < EndBuffer)
            pbSigBegin = pbDigestBuffer + (size_t)(pSI->BeginExclude - BeginBuffer);
        if(BeginBuffer <= pSI->EndExclude && pSI->EndExclude < EndBuffer)
            pbSigEnd = pbDigestBuffer + (size_t)(pSI->EndExclude - BeginBuffer);

        // Zero the part that belongs to the signature
        if(pbSigBegin != NULL || pbSigEnd != NULL)
        {
            if(pbSigBegin == NULL)
                pbSigBegin = pbDigestBuffer;
            if(pbSigEnd == NULL)
                pbSigEnd = pbDigestBuffer + dwToRead;

            memset(pbSigBegin, 0, (pbSigEnd - pbSigBegin));
        }

        // Pass the buffer to the hashing function
        md5_process(&md5_state, pbDigestBuffer, dwToRead);

        // Move pointers
        BeginBuffer += dwToRead;
    }

    // Finalize the MD5 hash
    md5_done(&md5_state, pMd5Digest);
    STORM_FREE(pbDigestBuffer);
    return true;
}
コード例 #21
0
//  hf            - MPQ File handle.
//  pbBuffer      - Pointer to target buffer to store sectors.
//  dwByteOffset  - Position of sector in the file (relative to file begin)
//  dwBytesToRead - Number of bytes to read. Must be multiplier of sector size.
//  pdwBytesRead  - Stored number of bytes loaded
static int ReadMpqSectors(TMPQFile * hf, LPBYTE pbBuffer, DWORD dwByteOffset, DWORD dwBytesToRead, LPDWORD pdwBytesRead)
{
    ULONGLONG RawFilePos;
    TMPQArchive * ha = hf->ha;
    TFileEntry * pFileEntry = hf->pFileEntry;
    LPBYTE pbRawSector = NULL;
    LPBYTE pbOutSector = pbBuffer;
    LPBYTE pbInSector = pbBuffer;
    DWORD dwRawBytesToRead;
    DWORD dwRawSectorOffset = dwByteOffset;
    DWORD dwSectorsToRead = dwBytesToRead / ha->dwSectorSize;
    DWORD dwSectorIndex = dwByteOffset / ha->dwSectorSize;
    DWORD dwSectorsDone = 0;
    DWORD dwBytesRead = 0;
    int nError = ERROR_SUCCESS;

    // Note that dwByteOffset must be aligned to size of one sector
    // Note that dwBytesToRead must be a multiplier of one sector size
    // This is local function, so we won't check if that's true.
    // Note that files stored in single units are processed by a separate function

    // If there is not enough bytes remaining, cut dwBytesToRead
    if((dwByteOffset + dwBytesToRead) > hf->dwDataSize)
        dwBytesToRead = hf->dwDataSize - dwByteOffset;
    dwRawBytesToRead = dwBytesToRead;

    // Perform all necessary work to do with compressed files
    if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
    {
        // If the sector positions are not loaded yet, do it
        if(hf->SectorOffsets == NULL)
        {
            nError = AllocateSectorOffsets(hf, true);
            if(nError != ERROR_SUCCESS)
                return nError;
        }

        // If the sector checksums are not loaded yet, load them now.
        if(hf->SectorChksums == NULL && (pFileEntry->dwFlags & MPQ_FILE_SECTOR_CRC) && hf->bLoadedSectorCRCs == false)
        {
            //
            // Sector CRCs is plain crap feature. It is almost never present,
            // often it's empty, or the end offset of sector CRCs is zero.
            // We only try to load sector CRCs once, and regardless if it fails
            // or not, we won't try that again for the given file.
            //

            AllocateSectorChecksums(hf, true);
            hf->bLoadedSectorCRCs = true;
        }

        // TODO: If the raw data MD5s are not loaded yet, load them now
        // Only do it if the MPQ is of format 4.0
//      if(ha->pHeader->wFormatVersion >= MPQ_FORMAT_VERSION_4 && ha->pHeader->dwRawChunkSize != 0)
//      {
//          nError = AllocateRawMD5s(hf, true);
//          if(nError != ERROR_SUCCESS)
//              return nError;
//      }

        // If the file is compressed, also allocate secondary buffer
        pbInSector = pbRawSector = STORM_ALLOC(BYTE, dwBytesToRead);
        if(pbRawSector == NULL)
            return ERROR_NOT_ENOUGH_MEMORY;

        // Assign the temporary buffer as target for read operation
        dwRawSectorOffset = hf->SectorOffsets[dwSectorIndex];
        dwRawBytesToRead = hf->SectorOffsets[dwSectorIndex + dwSectorsToRead] - dwRawSectorOffset;
    }

    // Calculate raw file offset where the sector(s) are stored.
    CalculateRawSectorOffset(RawFilePos, hf, dwRawSectorOffset);

    // Set file pointer and read all required sectors
    if(!FileStream_Read(ha->pStream, &RawFilePos, pbInSector, dwRawBytesToRead))
        return GetLastError();
    dwBytesRead = 0;

    // Now we have to decrypt and decompress all file sectors that have been loaded
    for(DWORD i = 0; i < dwSectorsToRead; i++)
    {
        DWORD dwRawBytesInThisSector = ha->dwSectorSize;
        DWORD dwBytesInThisSector = ha->dwSectorSize;
        DWORD dwIndex = dwSectorIndex + i;

        // If there is not enough bytes in the last sector,
        // cut the number of bytes in this sector
        if(dwRawBytesInThisSector > dwBytesToRead)
            dwRawBytesInThisSector = dwBytesToRead;
        if(dwBytesInThisSector > dwBytesToRead)
            dwBytesInThisSector = dwBytesToRead;

        // If the file is compressed, we have to adjust the raw sector size
        if(pFileEntry->dwFlags & MPQ_FILE_COMPRESSED)
            dwRawBytesInThisSector = hf->SectorOffsets[dwIndex + 1] - hf->SectorOffsets[dwIndex];

        // If the file is encrypted, we have to decrypt the sector
        if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
        {
            BSWAP_ARRAY32_UNSIGNED(pbInSector, dwRawBytesInThisSector);

            // If we don't know the key, try to detect it by file content
            if(hf->dwFileKey == 0)
            {
                hf->dwFileKey = DetectFileKeyByContent(pbInSector, dwBytesInThisSector);
                if(hf->dwFileKey == 0)
                {
                    nError = ERROR_UNKNOWN_FILE_KEY;
                    break;
                }
            }

            DecryptMpqBlock(pbInSector, dwRawBytesInThisSector, hf->dwFileKey + dwIndex);
            BSWAP_ARRAY32_UNSIGNED(pbInSector, dwRawBytesInThisSector);
        }

        // If the file has sector CRC check turned on, perform it
        if(hf->bCheckSectorCRCs && hf->SectorChksums != NULL)
        {
            DWORD dwAdlerExpected = hf->SectorChksums[dwIndex];
            DWORD dwAdlerValue = 0;

            // We can only check sector CRC when it's not zero
            // Neither can we check it if it's 0xFFFFFFFF.
            if(dwAdlerExpected != 0 && dwAdlerExpected != 0xFFFFFFFF)
            {
                dwAdlerValue = adler32(0, pbInSector, dwRawBytesInThisSector);
                if(dwAdlerValue != dwAdlerExpected)
                {
                    nError = ERROR_CHECKSUM_ERROR;
                    break;
                }
            }
        }

        // If the sector is really compressed, decompress it.
        // WARNING : Some sectors may not be compressed, it can be determined only
        // by comparing uncompressed and compressed size !!!
        if(dwRawBytesInThisSector < dwBytesInThisSector)
        {
            int cbOutSector = dwBytesInThisSector;
            int cbInSector = dwRawBytesInThisSector;
            int nResult = 0;

            // Is the file compressed by PKWARE Data Compression Library ?
            if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
                nResult = SCompExplode((char *)pbOutSector, &cbOutSector, (char *)pbInSector, cbInSector);

            // Is the file compressed by Blizzard's multiple compression ?
            if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
            {
                hf->PreviousCompression = pbInSector[0];
                nResult = SCompDecompress((char *)pbOutSector, &cbOutSector, (char *)pbInSector, cbInSector);
            }

            // Did the decompression fail ?
            if(nResult == 0)
            {
                nError = ERROR_FILE_CORRUPT;
                break;
            }
        }
        else
        {
            if(pbOutSector != pbInSector)
                memcpy(pbOutSector, pbInSector, dwBytesInThisSector);
        }

        // Move pointers
        dwBytesToRead -= dwBytesInThisSector;
        dwByteOffset += dwBytesInThisSector;
        dwBytesRead += dwBytesInThisSector;
        pbOutSector += dwBytesInThisSector;
        pbInSector += dwRawBytesInThisSector;
        dwSectorsDone++;
    }

    // Free all used buffers
    if(pbRawSector != NULL)
        STORM_FREE(pbRawSector);

    // Give the caller thenumber of bytes read
    *pdwBytesRead = dwBytesRead;
    return nError;
}
コード例 #22
0
ファイル: SFileVerify.cpp プロジェクト: 3DViking/MistCore
static bool CalculateMpqHashSha1(
    TMPQArchive * ha,
    PMPQ_SIGNATURE_INFO pSI,
    unsigned char * sha1_tail0,
    unsigned char * sha1_tail1,
    unsigned char * sha1_tail2)
{
    ULONGLONG BeginBuffer;
    hash_state sha1_state_temp;
    hash_state sha1_state;
    LPBYTE pbDigestBuffer = NULL;
    char szPlainName[MAX_PATH];

    // Allocate buffer for creating the MPQ digest.
    pbDigestBuffer = STORM_ALLOC(BYTE, MPQ_DIGEST_UNIT_SIZE);
    if(pbDigestBuffer == NULL)
        return false;

    // Initialize SHA1 state structure
    sha1_init(&sha1_state);

    // Calculate begin of data to be hashed
    BeginBuffer = pSI->BeginMpqData;

    // Create the digest
    for(;;)
    {
        ULONGLONG BytesRemaining;
        DWORD dwToRead = MPQ_DIGEST_UNIT_SIZE;

        // Check the number of bytes remaining
        BytesRemaining = pSI->EndMpqData - BeginBuffer;
        if(BytesRemaining < MPQ_DIGEST_UNIT_SIZE)
            dwToRead = (DWORD)BytesRemaining;
        if(dwToRead == 0)
            break;

        // Read the next chunk 
        if(!FileStream_Read(ha->pStream, &BeginBuffer, pbDigestBuffer, dwToRead))
        {
            STORM_FREE(pbDigestBuffer);
            return false;
        }

        // Pass the buffer to the hashing function
        sha1_process(&sha1_state, pbDigestBuffer, dwToRead);

        // Move pointers
        BeginBuffer += dwToRead;
    }

    // Add all three known tails and generate three hashes
    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    sha1_done(&sha1_state_temp, sha1_tail0);

    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    GetPlainAnsiFileName(FileStream_GetFileName(ha->pStream), szPlainName);
    AddTailToSha1(&sha1_state_temp, szPlainName);
    sha1_done(&sha1_state_temp, sha1_tail1);

    memcpy(&sha1_state_temp, &sha1_state, sizeof(hash_state));
    AddTailToSha1(&sha1_state_temp, "ARCHIVE");
    sha1_done(&sha1_state_temp, sha1_tail2);

    // Finalize the MD5 hash
    STORM_FREE(pbDigestBuffer);
    return true;
}
コード例 #23
0
ファイル: SFileVerify.cpp プロジェクト: 3DViking/MistCore
static int VerifyRawMpqData(
    TMPQArchive * ha,
    ULONGLONG ByteOffset,
    DWORD dwDataSize)
{
    ULONGLONG DataOffset = ha->MpqPos + ByteOffset;
    LPBYTE pbDataChunk;
    LPBYTE pbMD5Array1;                 // Calculated MD5 array
    LPBYTE pbMD5Array2;                 // MD5 array loaded from the MPQ
    DWORD dwBytesInChunk;
    DWORD dwChunkCount;
    DWORD dwChunkSize = ha->pHeader->dwRawChunkSize;
    DWORD dwMD5Size;
    int nError = ERROR_SUCCESS;

    // Don't verify zero-sized blocks
    if(dwDataSize == 0)
        return ERROR_SUCCESS;

    // Get the number of data chunks to calculate MD5
    assert(dwChunkSize != 0);
    dwChunkCount = ((dwDataSize - 1) / dwChunkSize) + 1;
    dwMD5Size = dwChunkCount * MD5_DIGEST_SIZE;

    // Allocate space for data chunk and for the MD5 array
    pbDataChunk = STORM_ALLOC(BYTE, dwChunkSize);
    if(pbDataChunk == NULL)
        return ERROR_NOT_ENOUGH_MEMORY;

    // Allocate space for MD5 array
    pbMD5Array1 = STORM_ALLOC(BYTE, dwMD5Size);
    pbMD5Array2 = STORM_ALLOC(BYTE, dwMD5Size);
    if(pbMD5Array1 == NULL || pbMD5Array2 == NULL)
        nError = ERROR_NOT_ENOUGH_MEMORY;

    // Calculate MD5 of each data chunk
    if(nError == ERROR_SUCCESS)
    {
        LPBYTE pbMD5 = pbMD5Array1;

        for(DWORD i = 0; i < dwChunkCount; i++)
        {
            // Get the number of bytes in the chunk
            dwBytesInChunk = STORMLIB_MIN(dwChunkSize, dwDataSize);

            // Read the data chunk
            if(!FileStream_Read(ha->pStream, &DataOffset, pbDataChunk, dwBytesInChunk))
            {
                nError = ERROR_FILE_CORRUPT;
                break;
            }

            // Calculate MD5
            CalculateDataBlockHash(pbDataChunk, dwBytesInChunk, pbMD5);

            // Move pointers and offsets
            DataOffset += dwBytesInChunk;
            dwDataSize -= dwBytesInChunk;
            pbMD5 += MD5_DIGEST_SIZE;
        }
    }

    // Read the MD5 array
    if(nError == ERROR_SUCCESS)
    {
        // Read the array of MD5
        if(!FileStream_Read(ha->pStream, &DataOffset, pbMD5Array2, dwMD5Size))
            nError = GetLastError();
    }

    // Compare the array of MD5
    if(nError == ERROR_SUCCESS)
    {
        // Compare the MD5
        if(memcmp(pbMD5Array1, pbMD5Array2, dwMD5Size))
            nError = ERROR_FILE_CORRUPT;
    }

    // Free memory and return result
    if(pbMD5Array2 != NULL)
        STORM_FREE(pbMD5Array2);
    if(pbMD5Array1 != NULL)
        STORM_FREE(pbMD5Array1);
    if(pbDataChunk != NULL)
        STORM_FREE(pbDataChunk);
    return nError;
}
コード例 #24
0
ファイル: SFileOpenArchive.cpp プロジェクト: dogganz/StormLib
bool WINAPI SFileOpenArchive(
    const TCHAR * szMpqName,
    DWORD dwPriority,
    DWORD dwFlags,
    HANDLE * phMpq)
{
    TMPQUserData * pUserData;
    TFileStream * pStream = NULL;       // Open file stream
    TMPQArchive * ha = NULL;            // Archive handle
    TFileEntry * pFileEntry;
    ULONGLONG FileSize = 0;             // Size of the file
    LPBYTE pbHeaderBuffer = NULL;       // Buffer for searching MPQ header
    DWORD dwStreamFlags = (dwFlags & STREAM_FLAGS_MASK);
    bool bIsWarcraft3Map = false;
    int nError = ERROR_SUCCESS;   

    // Verify the parameters
    if(szMpqName == NULL || *szMpqName == 0 || phMpq == NULL)
    {
        SetLastError(ERROR_INVALID_PARAMETER);
        return false;
    }

    // One time initialization of MPQ cryptography
    InitializeMpqCryptography();
    dwPriority = dwPriority;

    // If not forcing MPQ v 1.0, also use file bitmap
    dwStreamFlags |= (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) ? 0 : STREAM_FLAG_USE_BITMAP;

    // Open the MPQ archive file
    pStream = FileStream_OpenFile(szMpqName, dwStreamFlags);
    if(pStream == NULL)
        return false;

    // Check the file size. There must be at least 0x20 bytes
    if(nError == ERROR_SUCCESS)
    {
        FileStream_GetSize(pStream, &FileSize);
        if(FileSize < MPQ_HEADER_SIZE_V1)
            nError = ERROR_BAD_FORMAT;
    }

    // Allocate the MPQhandle
    if(nError == ERROR_SUCCESS)
    {
        if((ha = STORM_ALLOC(TMPQArchive, 1)) == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Allocate buffer for searching MPQ header
    if(nError == ERROR_SUCCESS)
    {
        pbHeaderBuffer = STORM_ALLOC(BYTE, HEADER_SEARCH_BUFFER_SIZE);
        if(pbHeaderBuffer == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Find the position of MPQ header
    if(nError == ERROR_SUCCESS)
    {
        ULONGLONG SearchOffset = 0;
        ULONGLONG EndOfSearch = FileSize;
        DWORD dwStrmFlags = 0;
        DWORD dwHeaderSize;
        DWORD dwHeaderID;
        bool bSearchComplete = false;

        memset(ha, 0, sizeof(TMPQArchive));
        ha->pfnHashString = HashStringSlash;
        ha->pStream = pStream;
        pStream = NULL;

        // Set the archive read only if the stream is read-only
        FileStream_GetFlags(ha->pStream, &dwStrmFlags);
        ha->dwFlags |= (dwStrmFlags & STREAM_FLAG_READ_ONLY) ? MPQ_FLAG_READ_ONLY : 0;

        // Also remember if we shall check sector CRCs when reading file
        ha->dwFlags |= (dwFlags & MPQ_OPEN_CHECK_SECTOR_CRC) ? MPQ_FLAG_CHECK_SECTOR_CRC : 0;

        // Also remember if this MPQ is a patch
        ha->dwFlags |= (dwFlags & MPQ_OPEN_PATCH) ? MPQ_FLAG_PATCH : 0;
       
        // Limit the header searching to about 130 MB of data
        if(EndOfSearch > 0x08000000)
            EndOfSearch = 0x08000000;

        // Find the offset of MPQ header within the file
        while(bSearchComplete == false && SearchOffset < EndOfSearch)
        {
            // Always read at least 0x1000 bytes for performance.
            // This is what Storm.dll (2002) does.
            DWORD dwBytesAvailable = HEADER_SEARCH_BUFFER_SIZE;
            DWORD dwInBufferOffset = 0;

            // Cut the bytes available, if needed
            if((FileSize - SearchOffset) < HEADER_SEARCH_BUFFER_SIZE)
                dwBytesAvailable = (DWORD)(FileSize - SearchOffset);

            // Read the eventual MPQ header
            if(!FileStream_Read(ha->pStream, &SearchOffset, pbHeaderBuffer, dwBytesAvailable))
            {
                nError = GetLastError();
                break;
            }

            // There are AVI files from Warcraft III with 'MPQ' extension.
            if(SearchOffset == 0)
            {
                if(IsAviFile((DWORD *)pbHeaderBuffer))
                {
                    nError = ERROR_AVI_FILE;
                    break;
                }

                bIsWarcraft3Map = IsWarcraft3Map((DWORD *)pbHeaderBuffer);
            }

            // Search the header buffer
            while(dwInBufferOffset < dwBytesAvailable)
            {
                // Copy the data from the potential header buffer to the MPQ header
                memcpy(ha->HeaderData, pbHeaderBuffer + dwInBufferOffset, sizeof(ha->HeaderData));

                // If there is the MPQ user data, process it
                // Note that Warcraft III does not check for user data, which is abused by many map protectors
                dwHeaderID = BSWAP_INT32_UNSIGNED(ha->HeaderData[0]);
                if(bIsWarcraft3Map == false && (dwFlags & MPQ_OPEN_FORCE_MPQ_V1) == 0)
                {
                    if(ha->pUserData == NULL && dwHeaderID == ID_MPQ_USERDATA)
                    {
                        // Verify if this looks like a valid user data
                        pUserData = IsValidMpqUserData(SearchOffset, FileSize, ha->HeaderData);
                        if(pUserData != NULL)
                        {
                            // Fill the user data header
                            ha->UserDataPos = SearchOffset;
                            ha->pUserData = &ha->UserData;
                            memcpy(ha->pUserData, pUserData, sizeof(TMPQUserData));

                            // Continue searching from that position
                            SearchOffset += ha->pUserData->dwHeaderOffs;
                            break;
                        }
                    }
                }

                // There must be MPQ header signature. Note that STORM.dll from Warcraft III actually
                // tests the MPQ header size. It must be at least 0x20 bytes in order to load it
                // Abused by Spazzler Map protector. Note that the size check is not present
                // in Storm.dll v 1.00, so Diablo I code would load the MPQ anyway.
                dwHeaderSize = BSWAP_INT32_UNSIGNED(ha->HeaderData[1]);
                if(dwHeaderID == ID_MPQ && dwHeaderSize >= MPQ_HEADER_SIZE_V1)
                {
                    // Now convert the header to version 4
                    nError = ConvertMpqHeaderToFormat4(ha, SearchOffset, FileSize, dwFlags);
                    bSearchComplete = true;
                    break;
                }

                // Check for MPK archives (Longwu Online - MPQ fork)
                if(dwHeaderID == ID_MPK)
                {
                    // Now convert the MPK header to MPQ Header version 4
                    nError = ConvertMpkHeaderToFormat4(ha, FileSize, dwFlags);
                    bSearchComplete = true;
                    break;
                }

                // If searching for the MPQ header is disabled, return an error
                if(dwFlags & MPQ_OPEN_NO_HEADER_SEARCH)
                {
                    nError = ERROR_NOT_SUPPORTED;
                    bSearchComplete = true;
                    break;
                }

                // Move the pointers
                SearchOffset += 0x200;
                dwInBufferOffset += 0x200;
            }
        }

        // Did we identify one of the supported headers?
        if(nError == ERROR_SUCCESS)
        {
            // Set the user data position to the MPQ header, if none
            if(ha->pUserData == NULL)
                ha->UserDataPos = SearchOffset;

            // Set the position of the MPQ header
            ha->pHeader  = (TMPQHeader *)ha->HeaderData;
            ha->MpqPos   = SearchOffset;
            ha->FileSize = FileSize;

            // Sector size must be nonzero.
            if(SearchOffset >= FileSize || ha->pHeader->wSectorSize == 0)
                nError = ERROR_BAD_FORMAT;
        }
    }

    // Fix table positions according to format
    if(nError == ERROR_SUCCESS)
    {
        // Dump the header
//      DumpMpqHeader(ha->pHeader);

        // W3x Map Protectors use the fact that War3's Storm.dll ignores the MPQ user data,
        // and ignores the MPQ format version as well. The trick is to
        // fake MPQ format 2, with an improper hi-word position of hash table and block table
        // We can overcome such protectors by forcing opening the archive as MPQ v 1.0
        if(dwFlags & MPQ_OPEN_FORCE_MPQ_V1)
        {
            ha->pHeader->wFormatVersion = MPQ_FORMAT_VERSION_1;
            ha->pHeader->dwHeaderSize = MPQ_HEADER_SIZE_V1;
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;
            ha->pUserData = NULL;
        }

        // Both MPQ_OPEN_NO_LISTFILE or MPQ_OPEN_NO_ATTRIBUTES trigger read only mode
        if(dwFlags & (MPQ_OPEN_NO_LISTFILE | MPQ_OPEN_NO_ATTRIBUTES))
            ha->dwFlags |= MPQ_FLAG_READ_ONLY;

        // Remember whether whis is a map for Warcraft III
        if(bIsWarcraft3Map)
            ha->dwFlags |= MPQ_FLAG_WAR3_MAP;

        // Set the size of file sector
        ha->dwSectorSize = (0x200 << ha->pHeader->wSectorSize);

        // Verify if any of the tables doesn't start beyond the end of the file
        nError = VerifyMpqTablePositions(ha, FileSize);
    }

    // Read the hash table. Ignore the result, as hash table is no longer required
    // Read HET table. Ignore the result, as HET table is no longer required
    if(nError == ERROR_SUCCESS)
    {
        nError = LoadAnyHashTable(ha);
    }

    // Now, build the file table. It will be built by combining
    // the block table, BET table, hi-block table, (attributes) and (listfile).
    if(nError == ERROR_SUCCESS)
    {
        nError = BuildFileTable(ha);
    }

    // Load the internal listfile and include it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_LISTFILE) == 0)
    {
        // Quick check for (listfile)
        pFileEntry = GetFileEntryLocale(ha, LISTFILE_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (listfile) is optional.
            SFileAddListFile((HANDLE)ha, NULL);
            ha->dwFileFlags1 = pFileEntry->dwFlags;
        }
    }

    // Load the "(attributes)" file and merge it to the file table
    if(nError == ERROR_SUCCESS && (dwFlags & MPQ_OPEN_NO_ATTRIBUTES) == 0 && (ha->dwFlags & MPQ_FLAG_BLOCK_TABLE_CUT) == 0)
    {
        // Quick check for (attributes)
        pFileEntry = GetFileEntryLocale(ha, ATTRIBUTES_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Ignore result of the operation. (attributes) is optional.
            SAttrLoadAttributes(ha);
            ha->dwFileFlags2 = pFileEntry->dwFlags;
        }
    }

    // Remember whether the archive has weak signature. Only for MPQs format 1.0.
    if(nError == ERROR_SUCCESS)
    {
        // Quick check for (signature)
        pFileEntry = GetFileEntryLocale(ha, SIGNATURE_NAME, LANG_NEUTRAL);
        if(pFileEntry != NULL)
        {
            // Just remember that the archive is weak-signed
            assert((pFileEntry->dwFlags & MPQ_FILE_EXISTS) != 0);
            ha->dwFileFlags3 = pFileEntry->dwFlags;
        }

        // Finally, set the MPQ_FLAG_READ_ONLY if the MPQ was found malformed
        ha->dwFlags |= (ha->dwFlags & MPQ_FLAG_MALFORMED) ? MPQ_FLAG_READ_ONLY : 0;
    }

    // Cleanup and exit
    if(nError != ERROR_SUCCESS)
    {
        FileStream_Close(pStream);
        FreeArchiveHandle(ha);
        SetLastError(nError);
        ha = NULL;
    }

    // Free the header buffer
    if(pbHeaderBuffer != NULL)
        STORM_FREE(pbHeaderBuffer);
    if(phMpq != NULL)
        *phMpq = ha;
    return (nError == ERROR_SUCCESS);
}
コード例 #25
0
/* address can be 0 */
static void LZMA_Callback_Free(void *p, void *address)
{
    p = p;
    if(address != NULL)
        STORM_FREE(address);
}
コード例 #26
0
/* Copies all file sectors into another archive. */
static int CopyMpqFileSectors(
    TMPQArchive * ha,
    TMPQFile * hf,
    TFileStream * pNewStream,
    uint64_t MpqFilePos)               /* MPQ file position in the new archive */
{
    TFileEntry * pFileEntry = hf->pFileEntry;
    uint64_t RawFilePos;               /* Used for calculating sector offset in the old MPQ archive */
    uint32_t dwBytesToCopy = pFileEntry->dwCmpSize;
    uint32_t dwPatchSize = 0;              /* Size of patch header */
    uint32_t dwFileKey1 = 0;               /* File key used for decryption */
    uint32_t dwFileKey2 = 0;               /* File key used for encryption */
    uint32_t dwCmpSize = 0;                /* Compressed file size, including patch header */
    int nError = ERROR_SUCCESS;

    /* Resolve decryption keys. Note that the file key given */
    /* in the TMPQFile structure also includes the key adjustment */
    if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED))
    {
        dwFileKey2 = dwFileKey1 = hf->dwFileKey;
        if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
        {
            dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (uint32_t)pFileEntry->ByteOffset;
            dwFileKey2 = (dwFileKey2 + (uint32_t)MpqFilePos) ^ pFileEntry->dwFileSize;
        }
    }

    /* If we have to save patch header, do it */
    if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL)
    {
        BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(uint32_t) * 3);
        if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength))
            nError = GetLastError();

        /* Save the size of the patch info */
        dwPatchSize = hf->pPatchInfo->dwLength;
    }

    /* If we have to save sector offset table, do it. */
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL)
    {
        uint32_t * SectorOffsetsCopy = STORM_ALLOC(uint32_t, hf->SectorOffsets[0] / sizeof(uint32_t));
        uint32_t dwSectorOffsLen = hf->SectorOffsets[0];

        assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0);
        assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK);

        if(SectorOffsetsCopy == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;

        /* Encrypt the secondary sector offset table and write it to the target file */
        if(nError == ERROR_SUCCESS)
        {
            memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
            if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
                EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1);

            BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
            if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen))
                nError = GetLastError();

            dwBytesToCopy -= dwSectorOffsLen;
            dwCmpSize += dwSectorOffsLen;
        }

        /* Update compact progress */
        if(ha->pfnCompactCB != NULL)
        {
            ha->CompactBytesProcessed += dwSectorOffsLen;
            ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
        }

        STORM_FREE(SectorOffsetsCopy);
    }

    /* Now we have to copy all file sectors. We do it without */
    /* recompression, because recompression is not necessary in this case */
    if(nError == ERROR_SUCCESS)
    {
        uint32_t dwSector;
        
        for(dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
        {
            uint32_t dwRawDataInSector = hf->dwSectorSize;
            uint32_t dwRawByteOffset = dwSector * hf->dwSectorSize;

            /* Fix the raw data length if the file is compressed */
            if(hf->SectorOffsets != NULL)
            {
                dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
                dwRawByteOffset = hf->SectorOffsets[dwSector];
            }

            /* Last sector: If there is not enough bytes remaining in the file, cut the raw size */
            if(dwRawDataInSector > dwBytesToCopy)
                dwRawDataInSector = dwBytesToCopy;

            /* Calculate the raw file offset of the file sector */
            RawFilePos = CalculateRawSectorOffset(hf, dwRawByteOffset);
            
            /* Read the file sector */
            if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            /* If necessary, re-encrypt the sector */
            /* Note: Recompression is not necessary here. Unlike encryption, */
            /* the compression does not depend on the position of the file in MPQ. */
            if((pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED) && dwFileKey1 != dwFileKey2)
            {
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT)
                    DecryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent));

                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS)
                    DecryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis));
                
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey1 + dwSector);
                EncryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwFileKey2 + dwSector);
                BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
                
                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_ANUBIS)
                    EncryptMpqBlockAnubis(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleAnubis));

                if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPT_SERPENT)
                    EncryptMpqBlockSerpent(hf->pbFileSector, dwRawDataInSector, &(ha->keyScheduleSerpent));
            }

            /* Now write the sector back to the file */
            if(!FileStream_Write(pNewStream, NULL, hf->pbFileSector, dwRawDataInSector))
            {
                nError = GetLastError();
                break;
            }

            /* Update compact progress */
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwRawDataInSector;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            /* Adjust byte counts */
            dwBytesToCopy -= dwRawDataInSector;
            dwCmpSize += dwRawDataInSector;
        }
    }

    /* Copy the sector CRCs, if any */
    /* Sector CRCs are always compressed (not imploded) and unencrypted */
    if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL && hf->SectorChksums != NULL)
    {
        uint32_t dwCrcLength;

        dwCrcLength = hf->SectorOffsets[hf->dwSectorCount + 1] - hf->SectorOffsets[hf->dwSectorCount];
        if(dwCrcLength != 0)
        {
            if(!FileStream_Read(ha->pStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, hf->SectorChksums, dwCrcLength))
                nError = GetLastError();

            /* Update compact progress */
            if(ha->pfnCompactCB != NULL)
            {
                ha->CompactBytesProcessed += dwCrcLength;
                ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
            }

            /* Size of the CRC block is also included in the compressed file size */
            dwBytesToCopy -= dwCrcLength;
            dwCmpSize += dwCrcLength;
        }
    }

    /* There might be extra data beyond sector checksum table
     * Sometimes, these data are even part of sector offset table
     * Examples:
     * 2012 - WoW\15354\locale-enGB.MPQ:DBFilesClient\SpellLevels.dbc
     * 2012 - WoW\15354\locale-enGB.MPQ:Interface\AddOns\Blizzard_AuctionUI\Blizzard_AuctionUI.xml
     */
    if(nError == ERROR_SUCCESS && dwBytesToCopy != 0)
    {
        unsigned char * pbExtraData;

        /* Allocate space for the extra data */
        pbExtraData = STORM_ALLOC(uint8_t, dwBytesToCopy);
        if(pbExtraData != NULL)
        {
            if(!FileStream_Read(ha->pStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            if(!FileStream_Write(pNewStream, NULL, pbExtraData, dwBytesToCopy))
                nError = GetLastError();

            /* Include these extra data in the compressed size */
            dwCmpSize += dwBytesToCopy;
            STORM_FREE(pbExtraData);
        }
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    /* Write the MD5's of the raw file data, if needed */
    if(nError == ERROR_SUCCESS && ha->pHeader->dwRawChunkSize != 0)
    {
        nError = WriteMpqDataMD5(pNewStream, 
                                 ha->MpqPos + MpqFilePos,
                                 pFileEntry->dwCmpSize,
                                 ha->pHeader->dwRawChunkSize);
    }

    /* Verify the number of bytes written */
    if(nError == ERROR_SUCCESS)
    {
        /* At this point, number of bytes written should be exactly
         * the same like the compressed file size. If it isn't,
         * there's something wrong (an unknown archive version, MPQ malformation, ...)
         * 
         * Note: Diablo savegames have very weird layout, and the file "hero"
         * seems to have improper compressed size. Instead of real compressed size,
         * the "dwCmpSize" member of the block table entry contains
         * uncompressed size of file data + size of the sector table.
         * If we compact the archive, Diablo will refuse to load the game
         *
         * Note: Some patch files in WOW patches don't count the patch header
         * into compressed size
         */

        if(!(dwCmpSize <= pFileEntry->dwCmpSize && pFileEntry->dwCmpSize <= dwCmpSize + dwPatchSize))
        {
            nError = ERROR_FILE_CORRUPT;
            assert(0);
        }
    }

    return nError;
}