int removeExtents(RawFile* rawFile) { uint32_t blocksLeft; HFSPlusForkData* forkData; uint32_t currentBlock; uint32_t startBlock; uint32_t blockCount; HFSPlusExtentDescriptor* descriptor; int currentExtent; HFSPlusExtentKey extentKey; int exact; extentKey.keyLength = sizeof(HFSPlusExtentKey) - sizeof(extentKey.keyLength); extentKey.forkType = 0; extentKey.fileID = rawFile->id; forkData = rawFile->forkData; blocksLeft = forkData->totalBlocks; currentExtent = 0; currentBlock = 0; descriptor = (HFSPlusExtentDescriptor*) forkData->extents; while(blocksLeft > 0) { if(currentExtent == 8) { if(rawFile->volume->extentsTree == NULL) { hfs_panic("no extents overflow file loaded yet!"); return FALSE; } if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) { free(descriptor); } extentKey.startBlock = currentBlock; descriptor = (HFSPlusExtentDescriptor*) search(rawFile->volume->extentsTree, (BTKey*)(&extentKey), &exact, NULL, NULL); if(descriptor == NULL || exact == FALSE) { hfs_panic("inconsistent extents information!"); return FALSE; } else { removeFromBTree(rawFile->volume->extentsTree, (BTKey*)(&extentKey)); currentExtent = 0; continue; } } startBlock = descriptor[currentExtent].startBlock; blockCount = descriptor[currentExtent].blockCount; currentBlock += blockCount; blocksLeft -= blockCount; currentExtent++; } if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) { free(descriptor); } return TRUE; }
uint32_t readHFSFile(HFSPlusCatalogFile* file, uint8_t** buffer, Volume* volume) { io_func* io; size_t bytesLeft; io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return 0; } bytesLeft = file->dataFork.logicalSize; *buffer = malloc(bytesLeft); if(!(*buffer)) { hfs_panic("error allocating memory"); return 0; } if(!READ(io, 0, bytesLeft, *buffer)) { hfs_panic("error reading"); } CLOSE(io); return file->dataFork.logicalSize; }
static int compressedRead(io_func* io, off_t location, size_t size, void *buffer) { HFSPlusCompressed* data = (HFSPlusCompressed*) io->data; size_t toRead; while(size > 0) { if(data->cached && location >= data->cachedStart && location < data->cachedEnd) { if((data->cachedEnd - location) < size) toRead = data->cachedEnd - location; else toRead = size; memcpy(buffer, data->cached + (location - data->cachedStart), toRead); size -= toRead; location += toRead; buffer = ((uint8_t*) buffer) + toRead; } if(size == 0) break; // Try to cache uLongf actualSize; uint32_t block = location / 0x10000; uint8_t* compressed = (uint8_t*) malloc(data->blocks->blocks[block].size); if(!READ(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[block].offset, data->blocks->blocks[block].size, compressed)) { hfs_panic("error reading"); } if(data->cached) free(data->cached); if (compressed[0] == 0xFF) { actualSize = data->blocks->blocks[block].size - 1; data->cached = (uint8_t*) malloc(actualSize); memcpy(data->cached, compressed + 1, actualSize); } else { data->cached = (uint8_t*) malloc(0x10000); actualSize = 0x10000; int rv = uncompress(data->cached, &actualSize, compressed, data->blocks->blocks[block].size); if (rv) { hfs_panic("error decompressing"); } } data->cachedStart = block * 0x10000; data->cachedEnd = data->cachedStart + actualSize; free(compressed); } return TRUE; }
void writeToHFSFile(HFSPlusCatalogFile* file, uint8_t* buffer, size_t bytesLeft, Volume* volume) { io_func* io; io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); return; } allocate((RawFile*)io->data, bytesLeft); if(!WRITE(io, 0, (size_t)bytesLeft, buffer)) { hfs_panic("error writing"); } CLOSE(io); }
void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volume) { unsigned char *buffer; io_func* io; off_t curPosition; off_t bytesLeft; buffer = (unsigned char*) malloc(BUFSIZE); bytesLeft = input->getLength(input); if(file->permissions.ownerFlags & UF_COMPRESSED) { io = openHFSPlusCompressed(volume, file); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } } else { io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } allocate((RawFile*)io->data, bytesLeft); } curPosition = 0; while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(input->read(input, buffer, BUFSIZE) != BUFSIZE) { hfs_panic("error reading"); } if(!WRITE(io, curPosition, BUFSIZE, buffer)) { hfs_panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(input->read(input, buffer, (size_t)bytesLeft) != (size_t)bytesLeft) { hfs_panic("error reading"); } if(!WRITE(io, curPosition, (size_t)bytesLeft, buffer)) { hfs_panic("error reading"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); free(buffer); }
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume) { unsigned char* buffer; io_func* io; off_t curPosition; size_t bytesLeft; buffer = (unsigned char*) malloc(BUFSIZE); if(file->permissions.ownerFlags & UF_COMPRESSED) { io = openHFSPlusCompressed(volume, file); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } curPosition = 0; bytesLeft = ((HFSPlusCompressed*) io->data)->decmpfs->size; } else { io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } curPosition = 0; bytesLeft = file->dataFork.logicalSize; } while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(!READ(io, curPosition, BUFSIZE, buffer)) { hfs_panic("error reading"); } if(output->write(output, buffer, BUFSIZE) != BUFSIZE) { hfs_panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(!READ(io, curPosition, bytesLeft, buffer)) { hfs_panic("error reading"); } if(output->write(output, buffer, bytesLeft) != bytesLeft) { hfs_panic("error writing"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); free(buffer); }
HFSPlusCatalogRecord* getRecordFromPath3(const char* path, Volume* volume, char **name, HFSPlusCatalogKey* retKey, char traverse, char returnLink, HFSCatalogNodeID parentID) { HFSPlusCatalogKey key; HFSPlusCatalogRecord* record; char* origPath; char* myPath; char* word; char* pathLimit; uint32_t realParent; int exact; if(path[0] == '\0' || (path[0] == '/' && path[1] == '\0')) { if(name != NULL) *name = (char*)path; key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length); key.parentID = kHFSRootFolderID; key.nodeName.length = 0; record = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&key), &exact, NULL, NULL); key.parentID = ((HFSPlusCatalogThread*)record)->parentID; key.nodeName = ((HFSPlusCatalogThread*)record)->nodeName; free(record); record = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&key), &exact, NULL, NULL); return record; } myPath = strdup(path); origPath = myPath; record = NULL; if(path[0] == '/') { key.parentID = kHFSRootFolderID; } else { key.parentID = parentID; } pathLimit = myPath + strlen(myPath); for(word = (char*)strtok(myPath, "/"); word && (word < pathLimit); word = ((word + strlen(word) + 1) < pathLimit) ? (char*)strtok(word + strlen(word) + 1, "/") : NULL) { if(name != NULL) *name = (char*)(path + (word - origPath)); if(record != NULL) { free(record); record = NULL; } if(word[0] == '\0') { continue; } ASCIIToUnicode(word, &key.nodeName); key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length) + (sizeof(uint16_t) * key.nodeName.length); record = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&key), &exact, NULL, NULL); if(record == NULL || exact == FALSE) { free(origPath); if(record != NULL) { free(record); } return NULL; } if(traverse) { if(((word + strlen(word) + 1) < pathLimit) || returnLink) { record = getLinkTarget(record, key.parentID, &key, volume); if(record == NULL || exact == FALSE) { free(origPath); return NULL; } } } if(record->recordType == kHFSPlusFileRecord) { if((word + strlen(word) + 1) >= pathLimit) { free(origPath); if(retKey != NULL) { memcpy(retKey, &key, sizeof(HFSPlusCatalogKey)); } return record; } else { free(origPath); free(record); return NULL; } } if(record->recordType != kHFSPlusFolderRecord) hfs_panic("inconsistent catalog tree!"); realParent = key.parentID; key.parentID = ((HFSPlusCatalogFolder*)record)->folderID; } if(retKey != NULL) { memcpy(retKey, &key, sizeof(HFSPlusCatalogKey)); retKey->parentID = realParent; } free(origPath); return record; }
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file) { io_func* io; HFSPlusCompressed* data; uLongf actualSize; io = (io_func*) malloc(sizeof(io_func)); data = (HFSPlusCompressed*) malloc(sizeof(HFSPlusCompressed)); data->volume = volume; data->file = file; io->data = data; io->read = &compressedRead; io->write = &compressedWrite; io->close = &closeHFSPlusCompressed; data->cached = NULL; data->cachedStart = 0; data->cachedEnd = 0; data->io = NULL; data->blocks = NULL; data->dirty = FALSE; data->decmpfsSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&data->decmpfs)); if(data->decmpfsSize == 0) { data->decmpfs = (HFSPlusDecmpfs*) malloc(0x1000); data->decmpfs->size = 0; return io; // previously not compressed file } flipHFSPlusDecmpfs(data->decmpfs); if(data->decmpfs->flags == 0x3) { data->cached = (uint8_t*) malloc(data->decmpfs->size); actualSize = data->decmpfs->size; uncompress(data->cached, &actualSize, data->decmpfs->data, data->decmpfsSize - sizeof(HFSPlusDecmpfs)); if(actualSize != data->decmpfs->size) { fprintf(stderr, "decmpfs: size mismatch\n"); } data->cachedStart = 0; data->cachedEnd = actualSize; } else { data->io = openRawFile(file->fileID, &file->resourceFork, (HFSPlusCatalogRecord*)file, volume); if(!data->io) { hfs_panic("error opening resource fork"); } if(!READ(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead)) { hfs_panic("error reading"); } flipRsrcHead(&data->rsrcHead); data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead)); if(!READ(data->io, data->rsrcHead.headerSize, sizeof(HFSPlusCmpfRsrcBlockHead), data->blocks)) { hfs_panic("error reading"); } flipRsrcBlockHead(data->blocks); data->blocks = (HFSPlusCmpfRsrcBlockHead*) realloc(data->blocks, sizeof(HFSPlusCmpfRsrcBlockHead) + (sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks)); if(!READ(data->io, data->rsrcHead.headerSize + sizeof(HFSPlusCmpfRsrcBlockHead), sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks, data->blocks->blocks)) { hfs_panic("error reading"); } int i; for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } } return io; }
static void closeHFSPlusCompressed(io_func* io) { HFSPlusCompressed* data = (HFSPlusCompressed*) io->data; if(data->io) CLOSE(data->io); if(data->dirty) { int oldSize = data->decmpfsSize; if(data->blocks) free(data->blocks); data->decmpfs->magic = CMPFS_MAGIC; data->decmpfs->flags = 0x4; data->decmpfsSize = sizeof(HFSPlusDecmpfs); uint32_t numBlocks = (data->decmpfs->size + 0xFFFF) / 0x10000; uint32_t blocksSize = sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock)); data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock))); data->blocks->numBlocks = numBlocks; data->blocks->dataSize = blocksSize - sizeof(uint32_t); // without the front dataSize in BlockHead. data->rsrcHead.headerSize = 0x100; data->rsrcHead.dataSize = blocksSize; data->rsrcHead.totalSize = data->rsrcHead.headerSize + data->rsrcHead.dataSize; data->rsrcHead.flags = 0x32; uint8_t* buffer = (uint8_t*) malloc((0x10000 * 1.1) + 12); uint32_t curFileOffset = data->blocks->dataSize; uint32_t i; for(i = 0; i < numBlocks; i++) { data->blocks->blocks[i].offset = curFileOffset; uLongf actualSize = (0x10000 * 1.1) + 12; compress(buffer, &actualSize, data->cached + (0x10000 * i), (data->decmpfs->size - (0x10000 * i)) > 0x10000 ? 0x10000 : (data->decmpfs->size - (0x10000 * i))); data->blocks->blocks[i].size = actualSize; // check if we can fit the whole thing into an inline extended attribute // a little fudge factor here since sizeof(HFSPlusAttrKey) is bigger than it ought to be, since only 127 characters are strictly allowed if(numBlocks <= 1 && (actualSize + sizeof(HFSPlusDecmpfs) + sizeof(HFSPlusAttrKey)) <= 0x1000) { int newSize = (sizeof(HFSPlusDecmpfs) + actualSize + 1) & ~1; if (oldSize < newSize) { printf("growing "); data->decmpfs = realloc(data->decmpfs, newSize); memset(data->decmpfs->data + actualSize, 0, newSize - actualSize - sizeof(HFSPlusDecmpfs)); } data->decmpfs->flags = 0x3; memcpy(data->decmpfs->data, buffer, actualSize); data->decmpfsSize = newSize; printf("inline data\n"); break; } else { if(i == 0) { data->io = openRawFile(data->file->fileID, &data->file->resourceFork, (HFSPlusCatalogRecord*)data->file, data->volume); if(!data->io) { hfs_panic("error opening resource fork"); } } WRITE(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[i].offset, data->blocks->blocks[i].size, buffer); curFileOffset += data->blocks->blocks[i].size; data->blocks->dataSize += data->blocks->blocks[i].size; data->rsrcHead.dataSize += data->blocks->blocks[i].size; data->rsrcHead.totalSize += data->blocks->blocks[i].size; } } free(buffer); if(data->decmpfs->flags == 0x4) { flipRsrcHead(&data->rsrcHead); WRITE(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead); flipRsrcHead(&data->rsrcHead); for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } flipRsrcBlockHead(data->blocks); WRITE(data->io, data->rsrcHead.headerSize, blocksSize, data->blocks); flipRsrcBlockHead(data->blocks); for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } HFSPlusCmpfEnd end; memset(&end, 0, sizeof(HFSPlusCmpfEnd)); end.unk1 = 0x1C; end.unk2 = 0x32; end.unk3 = 0x0; end.magic = CMPFS_MAGIC; end.flags = 0xA; end.size = 0xFFFF01; end.unk4 = 0x0; flipHFSPlusCmpfEnd(&end); WRITE(data->io, data->rsrcHead.totalSize, sizeof(HFSPlusCmpfEnd), &end); flipHFSPlusCmpfEnd(&end); CLOSE(data->io); } flipHFSPlusDecmpfs(data->decmpfs); setAttribute(data->volume, data->file->fileID, "com.apple.decmpfs", (uint8_t*)(data->decmpfs), data->decmpfsSize); flipHFSPlusDecmpfs(data->decmpfs); } if(data->cached) free(data->cached); if(data->blocks) free(data->blocks); free(data->decmpfs); free(data); free(io); }