Пример #1
0
bool WINAPI SFileSetHashTableSize(HANDLE hMpq, DWORD dwNewTableSize)
{
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
    TFileEntry * pFileEntry;
    TMPQHash * pOldHashTable = NULL;
    TMPQHash * pNewHashTable = NULL;
    TMPQHash * pTableToFree = NULL;
    TMPQHash * pHash = NULL;
    DWORD dwOldHashTableSize = 0;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if (!IsValidMpqHandle(ha))
        nError = ERROR_INVALID_HANDLE;
    if (ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;
    if (ha->pHetTable != NULL || ha->pHetTable != NULL)
        nError = ERROR_ACCESS_DENIED;

    // New hash table size must be a power of two
    if (dwNewTableSize & (dwNewTableSize - 1))
        nError = ERROR_INVALID_PARAMETER;

    // ALL file names must be known in order to be able
    // to rebuild hash table size
    if (nError == ERROR_SUCCESS)
        nError = CheckIfAllFilesKnown(ha, NULL, NULL);

    // Allocate buffer fo new hash table
    if (nError == ERROR_SUCCESS)
    {
        dwOldHashTableSize = ha->pHeader->dwHashTableSize;
        pOldHashTable = ha->pHashTable;

        pNewHashTable = ALLOCMEM(TMPQHash, dwNewTableSize);
        if (pOldHashTable == NULL || pNewHashTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now build the new hash table
    // we have to reallocate block table, and also all tables in the (attributes)
    if (nError == ERROR_SUCCESS)
    {
        // Set new hash table
        memset(pNewHashTable, 0xFF, dwNewTableSize * sizeof(TMPQHash));
        ha->pHashTable = pNewHashTable;
        ha->pHeader->dwHashTableSize = dwNewTableSize;

        // Make new hash table entry for each file
        for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
        {
            if (pFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // The file name must be known
                assert(pFileEntry->szFileName != NULL);

                // Create new hashtable entry
                pHash = AllocateHashEntry(ha, pFileEntry->szFileName, pFileEntry->lcLocale);
                if (pHash == NULL)
                {
                    nError = ERROR_CAN_NOT_COMPLETE;
                    break;
                }

                // Fill the hash table entry
                pHash->wPlatform = pFileEntry->wPlatform;
                pHash->dwBlockIndex = (DWORD)(pFileEntry - ha->pFileTable);

                // Fix the hash table index
                pFileEntry->dwHashIndex = (DWORD)(pHash - pNewHashTable);
            }
        }

        // If something failed, we have to revert hash table
        if (nError == ERROR_SUCCESS)
        {
            pTableToFree = pOldHashTable;
            ha->dwFlags |= MPQ_FLAG_CHANGED;
        }
        else
        {
            ha->pHeader->dwHashTableSize = dwOldHashTableSize;
            ha->pHashTable = pOldHashTable;
            pTableToFree = pNewHashTable;
        }
    }

    // Free buffers
    if (pTableToFree != NULL)
        FREEMEM(pTableToFree);

    // Return the result
    if (nError != ERROR_SUCCESS)
        SetLastError(nError);
    return (nError == ERROR_SUCCESS);
}
Пример #2
0
bool WINAPI SFileSetMaxFileCount(HANDLE hMpq, DWORD dwMaxFileCount)
{
    TMPQHetTable * pOldHetTable = NULL;
    TMPQArchive * ha = (TMPQArchive *)hMpq;
    TFileEntry * pOldFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
    TFileEntry * pOldFileTable = NULL;
    TFileEntry * pOldFileEntry;
    TFileEntry * pFileEntry;
    TMPQHash * pOldHashTable = NULL;
    DWORD dwOldHashTableSize = 0;
    DWORD dwOldFileTableSize = 0;
    int nError = ERROR_SUCCESS;

    // Test the valid parameters
    if(!IsValidMpqHandle(ha))
        nError = ERROR_INVALID_HANDLE;
    if(ha->dwFlags & MPQ_FLAG_READ_ONLY)
        nError = ERROR_ACCESS_DENIED;

    // The new limit must not be lower than the index of the last file entry in the table
    if(nError == ERROR_SUCCESS && ha->dwFileTableSize > dwMaxFileCount)
        nError = ERROR_DISK_FULL;

    // ALL file names must be known in order to be able
    // to rebuild hash table size
    if(nError == ERROR_SUCCESS)
    {
        nError = CheckIfAllFilesKnown(ha, NULL, NULL);
    }

    // If the MPQ has a hash table, then we relocate the hash table
    if(nError == ERROR_SUCCESS && ha->pHashTable != NULL)
    {
        // Save parameters for the current hash table
        dwOldHashTableSize = ha->pHeader->dwHashTableSize;
        pOldHashTable = ha->pHashTable;

        // Allocate new hash table
        ha->pHeader->dwHashTableSize = GetHashTableSizeForFileCount(dwMaxFileCount);
        ha->pHashTable = STORM_ALLOC(TMPQHash, ha->pHeader->dwHashTableSize);
        if(ha->pHashTable != NULL)
            memset(ha->pHashTable, 0xFF, ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // If the MPQ has HET table, allocate new one as well
    if(nError == ERROR_SUCCESS && ha->pHetTable != NULL)
    {
        // Save the original HET table
        pOldHetTable = ha->pHetTable;

        // Create new one
        ha->pHetTable = CreateHetTable(dwMaxFileCount, 0x40, true);
        if(ha->pHetTable == NULL)
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now reallocate the file table
    if(nError == ERROR_SUCCESS)
    {
        // Save the current file table
        dwOldFileTableSize = ha->dwFileTableSize;
        pOldFileTable = ha->pFileTable;

        // Create new one
        ha->pFileTable = STORM_ALLOC(TFileEntry, dwMaxFileCount);
        if(ha->pFileTable != NULL)
            memset(ha->pFileTable, 0, dwMaxFileCount * sizeof(TFileEntry));
        else
            nError = ERROR_NOT_ENOUGH_MEMORY;
    }

    // Now we have to build both classic hash table and HET table.
    if(nError == ERROR_SUCCESS)
    {
        DWORD dwFileIndex = 0;
        DWORD dwHashIndex = 0;

        // Create new hash and HET entry for each file
        pFileEntry = ha->pFileTable;
        for(pOldFileEntry = pOldFileTable; pOldFileEntry < pOldFileTableEnd; pOldFileEntry++)
        {
            if(pOldFileEntry->dwFlags & MPQ_FILE_EXISTS)
            {
                // Copy the old file entry to the new one
                memcpy(pFileEntry, pOldFileEntry, sizeof(TFileEntry));
                assert(pFileEntry->szFileName != NULL);
                
                // Create new entry in the hash table
                if(ha->pHashTable != NULL)
                {
                    dwHashIndex = AllocateHashEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Create new entry in the HET table, if needed
                if(ha->pHetTable != NULL)
                {
                    dwHashIndex = AllocateHetEntry(ha, pFileEntry);
                    if(dwHashIndex == HASH_ENTRY_FREE)
                    {
                        nError = ERROR_CAN_NOT_COMPLETE;
                        break;
                    }
                }

                // Move to the next file entry in the new table
                pFileEntry++;
                dwFileIndex++;
            }
        }
    }

    // Mark the archive as changed
    // Note: We always have to rebuild the (attributes) file due to file table change
    if(nError == ERROR_SUCCESS)
    {
        ha->dwMaxFileCount = dwMaxFileCount;
        InvalidateInternalFiles(ha);
    }
    else
    {
        // Revert the hash table
        if(ha->pHashTable != NULL && pOldHashTable != NULL)
        {
            STORM_FREE(ha->pHashTable);
            ha->pHeader->dwHashTableSize = dwOldHashTableSize;
            ha->pHashTable = pOldHashTable;
        }

        // Revert the HET table
        if(ha->pHetTable != NULL && pOldHetTable != NULL)
        {
            FreeHetTable(ha->pHetTable);
            ha->pHetTable = pOldHetTable;
        }

        // Revert the file table
        if(pOldFileTable != NULL)
        {
            STORM_FREE(ha->pFileTable);
            ha->pFileTable = pOldFileTable;
        }

        SetLastError(nError);
    }

    // Return the result
    return (nError == ERROR_SUCCESS);
}