bool WINAPI SFileFreeFileInfo(void * pvFileInfo, SFileInfoClass InfoClass) { switch(InfoClass) { case SFileMpqHetTable: FreeHetTable((TMPQHetTable *)pvFileInfo); return true; case SFileMpqBetTable: FreeBetTable((TMPQBetTable *)pvFileInfo); return true; default: break; } SetLastError(ERROR_INVALID_PARAMETER); return false; }
int EXPORT_SYMBOL SFileFreeFileInfo(void * pvFileInfo, SFileInfoClass InfoClass) { switch(InfoClass) { case SFileMpqHetTable: FreeHetTable((TMPQHetTable *)pvFileInfo); return 1; case SFileMpqBetTable: FreeBetTable((TMPQBetTable *)pvFileInfo); return 1; default: break; } SetLastError(ERROR_INVALID_PARAMETER); return 0; }
bool WINAPI SFileSetMaxFileCount(HANDLE hMpq, DWORD dwMaxFileCount) { TMPQHetTable * pOldHetTable = NULL; TMPQArchive * ha = (TMPQArchive *)hMpq; TFileEntry * pOldFileTableEnd = ha->pFileTable + ha->dwFileTableSize; TFileEntry * pOldFileTable = NULL; TFileEntry * pOldFileEntry; TFileEntry * pFileEntry; TMPQHash * pOldHashTable = NULL; DWORD dwOldHashTableSize = 0; DWORD dwOldFileTableSize = 0; int nError = ERROR_SUCCESS; // Test the valid parameters if(!IsValidMpqHandle(ha)) nError = ERROR_INVALID_HANDLE; if(ha->dwFlags & MPQ_FLAG_READ_ONLY) nError = ERROR_ACCESS_DENIED; // The new limit must not be lower than the index of the last file entry in the table if(nError == ERROR_SUCCESS && ha->dwFileTableSize > dwMaxFileCount) nError = ERROR_DISK_FULL; // ALL file names must be known in order to be able // to rebuild hash table size if(nError == ERROR_SUCCESS) { nError = CheckIfAllFilesKnown(ha, NULL, NULL); } // If the MPQ has a hash table, then we relocate the hash table if(nError == ERROR_SUCCESS && ha->pHashTable != NULL) { // Save parameters for the current hash table dwOldHashTableSize = ha->pHeader->dwHashTableSize; pOldHashTable = ha->pHashTable; // Allocate new hash table ha->pHeader->dwHashTableSize = GetHashTableSizeForFileCount(dwMaxFileCount); ha->pHashTable = STORM_ALLOC(TMPQHash, ha->pHeader->dwHashTableSize); if(ha->pHashTable != NULL) memset(ha->pHashTable, 0xFF, ha->pHeader->dwHashTableSize * sizeof(TMPQHash)); else nError = ERROR_NOT_ENOUGH_MEMORY; } // If the MPQ has HET table, allocate new one as well if(nError == ERROR_SUCCESS && ha->pHetTable != NULL) { // Save the original HET table pOldHetTable = ha->pHetTable; // Create new one ha->pHetTable = CreateHetTable(dwMaxFileCount, 0x40, true); if(ha->pHetTable == NULL) nError = ERROR_NOT_ENOUGH_MEMORY; } // Now reallocate the file table if(nError == ERROR_SUCCESS) { // Save the current file table dwOldFileTableSize = ha->dwFileTableSize; pOldFileTable = ha->pFileTable; // Create new one ha->pFileTable = STORM_ALLOC(TFileEntry, dwMaxFileCount); if(ha->pFileTable != NULL) memset(ha->pFileTable, 0, dwMaxFileCount * sizeof(TFileEntry)); else nError = ERROR_NOT_ENOUGH_MEMORY; } // Now we have to build both classic hash table and HET table. if(nError == ERROR_SUCCESS) { DWORD dwFileIndex = 0; DWORD dwHashIndex = 0; // Create new hash and HET entry for each file pFileEntry = ha->pFileTable; for(pOldFileEntry = pOldFileTable; pOldFileEntry < pOldFileTableEnd; pOldFileEntry++) { if(pOldFileEntry->dwFlags & MPQ_FILE_EXISTS) { // Copy the old file entry to the new one memcpy(pFileEntry, pOldFileEntry, sizeof(TFileEntry)); assert(pFileEntry->szFileName != NULL); // Create new entry in the hash table if(ha->pHashTable != NULL) { dwHashIndex = AllocateHashEntry(ha, pFileEntry); if(dwHashIndex == HASH_ENTRY_FREE) { nError = ERROR_CAN_NOT_COMPLETE; break; } } // Create new entry in the HET table, if needed if(ha->pHetTable != NULL) { dwHashIndex = AllocateHetEntry(ha, pFileEntry); if(dwHashIndex == HASH_ENTRY_FREE) { nError = ERROR_CAN_NOT_COMPLETE; break; } } // Move to the next file entry in the new table pFileEntry++; dwFileIndex++; } } } // Mark the archive as changed // Note: We always have to rebuild the (attributes) file due to file table change if(nError == ERROR_SUCCESS) { ha->dwMaxFileCount = dwMaxFileCount; InvalidateInternalFiles(ha); } else { // Revert the hash table if(ha->pHashTable != NULL && pOldHashTable != NULL) { STORM_FREE(ha->pHashTable); ha->pHeader->dwHashTableSize = dwOldHashTableSize; ha->pHashTable = pOldHashTable; } // Revert the HET table if(ha->pHetTable != NULL && pOldHetTable != NULL) { FreeHetTable(ha->pHetTable); ha->pHetTable = pOldHetTable; } // Revert the file table if(pOldFileTable != NULL) { STORM_FREE(ha->pFileTable); ha->pFileTable = pOldFileTable; } SetLastError(nError); } // Return the result return (nError == ERROR_SUCCESS); }