uint32_t readHFSFile(HFSPlusCatalogFile* file, uint8_t** buffer, Volume* volume) { io_func* io; size_t bytesLeft; io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return 0; } bytesLeft = file->dataFork.logicalSize; *buffer = malloc(bytesLeft); if(!(*buffer)) { hfs_panic("error allocating memory"); return 0; } if(!READ(io, 0, bytesLeft, *buffer)) { hfs_panic("error reading"); } CLOSE(io); return file->dataFork.logicalSize; }
int makeSymlink(const char* pathName, const char* target, Volume* volume) { io_func* io; HFSPlusCatalogFile* record; record = (HFSPlusCatalogFile*) getRecordFromPath3(pathName, volume, NULL, NULL, TRUE, FALSE, kHFSRootFolderID); if(!record) { newFile(pathName, volume); record = (HFSPlusCatalogFile*) getRecordFromPath(pathName, volume, NULL, NULL); if(!record) { return FALSE; } record->permissions.fileMode |= S_IFLNK; record->userInfo.fileType = kSymLinkFileType; record->userInfo.fileCreator = kSymLinkCreator; updateCatalog(volume, (HFSPlusCatalogRecord*) record); } else { if(record->recordType != kHFSPlusFileRecord || (((HFSPlusCatalogFile*)record)->permissions.fileMode & S_IFLNK) != S_IFLNK) { free(record); return FALSE; } } io = openRawFile(record->fileID, &record->dataFork, (HFSPlusCatalogRecord*) record, volume); WRITE(io, 0, strlen(target), (void*) target); CLOSE(io); free(record); return TRUE; }
HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNodeID parentID, HFSPlusCatalogKey *key, Volume* volume) { io_func* io; char pathBuffer[1024]; HFSPlusCatalogRecord* toReturn; HFSPlusCatalogKey nkey; int exact; if(record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)record)->permissions.fileMode & S_IFLNK) == S_IFLNK) { io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &(((HFSPlusCatalogFile*)record)->dataFork), record, volume); READ(io, 0, (((HFSPlusCatalogFile*)record)->dataFork).logicalSize, pathBuffer); CLOSE(io); pathBuffer[(((HFSPlusCatalogFile*)record)->dataFork).logicalSize] = '\0'; toReturn = getRecordFromPath3(pathBuffer, volume, NULL, key, TRUE, TRUE, parentID); free(record); return toReturn; } else if(record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)record)->userInfo.fileType) == kHardLinkFileType) { sprintf(pathBuffer, "iNode%d", ((HFSPlusCatalogFile*)record)->permissions.special.iNodeNum); nkey.parentID = volume->metadataDir; ASCIIToUnicode(pathBuffer, &nkey.nodeName); nkey.keyLength = sizeof(nkey.parentID) + sizeof(nkey.nodeName.length) + (sizeof(uint16_t) * nkey.nodeName.length); toReturn = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&nkey), &exact, NULL, NULL); free(record); return toReturn; } else { return record; } }
void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volume) { unsigned char *buffer; io_func* io; off_t curPosition; off_t bytesLeft; buffer = (unsigned char*) malloc(BUFSIZE); bytesLeft = input->getLength(input); if(file->permissions.ownerFlags & UF_COMPRESSED) { io = openHFSPlusCompressed(volume, file); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } } else { io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } allocate((RawFile*)io->data, bytesLeft); } curPosition = 0; while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(input->read(input, buffer, BUFSIZE) != BUFSIZE) { hfs_panic("error reading"); } if(!WRITE(io, curPosition, BUFSIZE, buffer)) { hfs_panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(input->read(input, buffer, (size_t)bytesLeft) != (size_t)bytesLeft) { hfs_panic("error reading"); } if(!WRITE(io, curPosition, (size_t)bytesLeft, buffer)) { hfs_panic("error reading"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); free(buffer); }
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume) { unsigned char* buffer; io_func* io; off_t curPosition; size_t bytesLeft; buffer = (unsigned char*) malloc(BUFSIZE); if(file->permissions.ownerFlags & UF_COMPRESSED) { io = openHFSPlusCompressed(volume, file); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } curPosition = 0; bytesLeft = ((HFSPlusCompressed*) io->data)->decmpfs->size; } else { io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); free(buffer); return; } curPosition = 0; bytesLeft = file->dataFork.logicalSize; } while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(!READ(io, curPosition, BUFSIZE, buffer)) { hfs_panic("error reading"); } if(output->write(output, buffer, BUFSIZE) != BUFSIZE) { hfs_panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(!READ(io, curPosition, bytesLeft, buffer)) { hfs_panic("error reading"); } if(output->write(output, buffer, bytesLeft) != bytesLeft) { hfs_panic("error writing"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); free(buffer); }
void writeToHFSFile(HFSPlusCatalogFile* file, uint8_t* buffer, size_t bytesLeft, Volume* volume) { io_func* io; io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { hfs_panic("error opening file"); return; } allocate((RawFile*)io->data, bytesLeft); if(!WRITE(io, 0, (size_t)bytesLeft, buffer)) { hfs_panic("error writing"); } CLOSE(io); }
HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNodeID parentID, HFSPlusCatalogKey *key, Volume* volume) { io_func* io; char pathBuffer[1024]; HFSPlusCatalogRecord* toReturn; if(record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)record)->permissions.fileMode & S_IFLNK) == S_IFLNK) { io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &(((HFSPlusCatalogFile*)record)->dataFork), record, volume); READ(io, 0, (((HFSPlusCatalogFile*)record)->dataFork).logicalSize, pathBuffer); CLOSE(io); pathBuffer[(((HFSPlusCatalogFile*)record)->dataFork).logicalSize] = '\0'; toReturn = getRecordFromPath3(pathBuffer, volume, NULL, key, TRUE, TRUE, parentID); free(record); return toReturn; } else { return record; } }
void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volume) { unsigned char buffer[BUFSIZE]; io_func* io; off_t curPosition; off_t bytesLeft; bytesLeft = input->getLength(input); io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { panic("error opening file"); return; } curPosition = 0; allocate((RawFile*)io->data, bytesLeft); while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(input->read(input, buffer, BUFSIZE) != BUFSIZE) { panic("error reading"); } if(!WRITE(io, curPosition, BUFSIZE, buffer)) { panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(input->read(input, buffer, (size_t)bytesLeft) != (size_t)bytesLeft) { panic("error reading"); } if(!WRITE(io, curPosition, (size_t)bytesLeft, buffer)) { panic("error reading"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); }
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume) { unsigned char buffer[BUFSIZE]; io_func* io; off_t curPosition; size_t bytesLeft; io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume); if(io == NULL) { panic("error opening file"); return; } curPosition = 0; bytesLeft = file->dataFork.logicalSize; while(bytesLeft > 0) { if(bytesLeft > BUFSIZE) { if(!READ(io, curPosition, BUFSIZE, buffer)) { panic("error reading"); } if(output->write(output, buffer, BUFSIZE) != BUFSIZE) { panic("error writing"); } curPosition += BUFSIZE; bytesLeft -= BUFSIZE; } else { if(!READ(io, curPosition, bytesLeft, buffer)) { panic("error reading"); } if(output->write(output, buffer, bytesLeft) != bytesLeft) { panic("error writing"); } curPosition += bytesLeft; bytesLeft -= bytesLeft; } } CLOSE(io); }
int removeFile(const char* fileName, Volume* volume) { HFSPlusCatalogRecord* record; HFSPlusCatalogKey key; io_func* io; HFSPlusCatalogFolder* parentFolder = NULL; record = getRecordFromPath3(fileName, volume, NULL, &key, TRUE, FALSE, kHFSRootFolderID); if(record != NULL) { parentFolder = (HFSPlusCatalogFolder*) getRecordByCNID(key.parentID, volume); if(parentFolder != NULL) { if(parentFolder->recordType != kHFSPlusFolderRecord) { ASSERT(FALSE, "parent not folder"); free(parentFolder); return FALSE; } } else { ASSERT(FALSE, "can't find parent"); return FALSE; } if(record->recordType == kHFSPlusFileRecord) { XAttrList* next, *attrs; io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &((HFSPlusCatalogFile*)record)->dataFork, record, volume); allocate((RawFile*)io->data, 0); CLOSE(io); removeFromBTree(volume->catalogTree, (BTKey*)(&key)); attrs = getAllExtendedAttributes(((HFSPlusCatalogFile*)record)->fileID, volume); if(attrs != NULL) { while(attrs != NULL) { next = attrs->next; unsetAttribute(volume, ((HFSPlusCatalogFile*)record)->fileID, attrs->name); free(attrs->name); free(attrs); attrs = next; } } key.nodeName.length = 0; key.parentID = ((HFSPlusCatalogFile*)record)->fileID; key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length); removeFromBTree(volume->catalogTree, (BTKey*)(&key)); volume->volumeHeader->fileCount--; } else { if(((HFSPlusCatalogFolder*)record)->valence > 0) { free(record); free(parentFolder); ASSERT(FALSE, "folder not empty"); return FALSE; } else { XAttrList *next, *attrs; removeFromBTree(volume->catalogTree, (BTKey*)(&key)); attrs = getAllExtendedAttributes(((HFSPlusCatalogFolder*)record)->folderID, volume); if(attrs != NULL) { while(attrs != NULL) { next = attrs->next; unsetAttribute(volume, ((HFSPlusCatalogFolder*)record)->folderID, attrs->name); free(attrs->name); free(attrs); attrs = next; } } key.nodeName.length = 0; key.parentID = ((HFSPlusCatalogFolder*)record)->folderID; key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length); removeFromBTree(volume->catalogTree, (BTKey*)(&key)); } parentFolder->folderCount--; volume->volumeHeader->folderCount--; } parentFolder->valence--; updateCatalog(volume, (HFSPlusCatalogRecord*) parentFolder); updateVolume(volume); free(record); free(parentFolder); return TRUE; } else { if(parentFolder) free(parentFolder); ASSERT(FALSE, "cannot find record"); return FALSE; } }
Volume* openVolume(io_func* io) { Volume* volume; io_func* file; volume = (Volume*) malloc(sizeof(Volume)); volume->image = io; volume->extentsTree = NULL; volume->volumeHeader = readVolumeHeader(io, 1024); if(volume->volumeHeader == NULL) { free(volume); return NULL; } file = openRawFile(kHFSExtentsFileID, &volume->volumeHeader->extentsFile, NULL, volume); if(file == NULL) { free(volume->volumeHeader); free(volume); return NULL; } volume->extentsTree = openExtentsTree(file); if(volume->extentsTree == NULL) { free(volume->volumeHeader); free(volume); return NULL; } file = openRawFile(kHFSCatalogFileID, &volume->volumeHeader->catalogFile, NULL, volume); if(file == NULL) { closeBTree(volume->extentsTree); free(volume->volumeHeader); free(volume); return NULL; } volume->catalogTree = openCatalogTree(file); if(volume->catalogTree == NULL) { closeBTree(volume->extentsTree); free(volume->volumeHeader); free(volume); return NULL; } volume->allocationFile = openRawFile(kHFSAllocationFileID, &volume->volumeHeader->allocationFile, NULL, volume); if(volume->allocationFile == NULL) { closeBTree(volume->catalogTree); closeBTree(volume->extentsTree); free(volume->volumeHeader); free(volume); return NULL; } volume->attrTree = NULL; file = openRawFile(kHFSAttributesFileID, &volume->volumeHeader->attributesFile, NULL, volume); if(file != NULL) { volume->attrTree = openAttributesTree(file); if(!volume->attrTree) { CLOSE(file); } } volume->metadataDir = getMetadataDirectoryID(volume); return volume; }
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file) { io_func* io; HFSPlusCompressed* data; uLongf actualSize; io = (io_func*) malloc(sizeof(io_func)); data = (HFSPlusCompressed*) malloc(sizeof(HFSPlusCompressed)); data->volume = volume; data->file = file; io->data = data; io->read = &compressedRead; io->write = &compressedWrite; io->close = &closeHFSPlusCompressed; data->cached = NULL; data->cachedStart = 0; data->cachedEnd = 0; data->io = NULL; data->blocks = NULL; data->dirty = FALSE; data->decmpfsSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&data->decmpfs)); if(data->decmpfsSize == 0) { data->decmpfs = (HFSPlusDecmpfs*) malloc(0x1000); data->decmpfs->size = 0; return io; // previously not compressed file } flipHFSPlusDecmpfs(data->decmpfs); if(data->decmpfs->flags == 0x3) { data->cached = (uint8_t*) malloc(data->decmpfs->size); actualSize = data->decmpfs->size; uncompress(data->cached, &actualSize, data->decmpfs->data, data->decmpfsSize - sizeof(HFSPlusDecmpfs)); if(actualSize != data->decmpfs->size) { fprintf(stderr, "decmpfs: size mismatch\n"); } data->cachedStart = 0; data->cachedEnd = actualSize; } else { data->io = openRawFile(file->fileID, &file->resourceFork, (HFSPlusCatalogRecord*)file, volume); if(!data->io) { hfs_panic("error opening resource fork"); } if(!READ(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead)) { hfs_panic("error reading"); } flipRsrcHead(&data->rsrcHead); data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead)); if(!READ(data->io, data->rsrcHead.headerSize, sizeof(HFSPlusCmpfRsrcBlockHead), data->blocks)) { hfs_panic("error reading"); } flipRsrcBlockHead(data->blocks); data->blocks = (HFSPlusCmpfRsrcBlockHead*) realloc(data->blocks, sizeof(HFSPlusCmpfRsrcBlockHead) + (sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks)); if(!READ(data->io, data->rsrcHead.headerSize + sizeof(HFSPlusCmpfRsrcBlockHead), sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks, data->blocks->blocks)) { hfs_panic("error reading"); } int i; for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } } return io; }
static void closeHFSPlusCompressed(io_func* io) { HFSPlusCompressed* data = (HFSPlusCompressed*) io->data; if(data->io) CLOSE(data->io); if(data->dirty) { int oldSize = data->decmpfsSize; if(data->blocks) free(data->blocks); data->decmpfs->magic = CMPFS_MAGIC; data->decmpfs->flags = 0x4; data->decmpfsSize = sizeof(HFSPlusDecmpfs); uint32_t numBlocks = (data->decmpfs->size + 0xFFFF) / 0x10000; uint32_t blocksSize = sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock)); data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock))); data->blocks->numBlocks = numBlocks; data->blocks->dataSize = blocksSize - sizeof(uint32_t); // without the front dataSize in BlockHead. data->rsrcHead.headerSize = 0x100; data->rsrcHead.dataSize = blocksSize; data->rsrcHead.totalSize = data->rsrcHead.headerSize + data->rsrcHead.dataSize; data->rsrcHead.flags = 0x32; uint8_t* buffer = (uint8_t*) malloc((0x10000 * 1.1) + 12); uint32_t curFileOffset = data->blocks->dataSize; uint32_t i; for(i = 0; i < numBlocks; i++) { data->blocks->blocks[i].offset = curFileOffset; uLongf actualSize = (0x10000 * 1.1) + 12; compress(buffer, &actualSize, data->cached + (0x10000 * i), (data->decmpfs->size - (0x10000 * i)) > 0x10000 ? 0x10000 : (data->decmpfs->size - (0x10000 * i))); data->blocks->blocks[i].size = actualSize; // check if we can fit the whole thing into an inline extended attribute // a little fudge factor here since sizeof(HFSPlusAttrKey) is bigger than it ought to be, since only 127 characters are strictly allowed if(numBlocks <= 1 && (actualSize + sizeof(HFSPlusDecmpfs) + sizeof(HFSPlusAttrKey)) <= 0x1000) { int newSize = (sizeof(HFSPlusDecmpfs) + actualSize + 1) & ~1; if (oldSize < newSize) { printf("growing "); data->decmpfs = realloc(data->decmpfs, newSize); memset(data->decmpfs->data + actualSize, 0, newSize - actualSize - sizeof(HFSPlusDecmpfs)); } data->decmpfs->flags = 0x3; memcpy(data->decmpfs->data, buffer, actualSize); data->decmpfsSize = newSize; printf("inline data\n"); break; } else { if(i == 0) { data->io = openRawFile(data->file->fileID, &data->file->resourceFork, (HFSPlusCatalogRecord*)data->file, data->volume); if(!data->io) { hfs_panic("error opening resource fork"); } } WRITE(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[i].offset, data->blocks->blocks[i].size, buffer); curFileOffset += data->blocks->blocks[i].size; data->blocks->dataSize += data->blocks->blocks[i].size; data->rsrcHead.dataSize += data->blocks->blocks[i].size; data->rsrcHead.totalSize += data->blocks->blocks[i].size; } } free(buffer); if(data->decmpfs->flags == 0x4) { flipRsrcHead(&data->rsrcHead); WRITE(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead); flipRsrcHead(&data->rsrcHead); for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } flipRsrcBlockHead(data->blocks); WRITE(data->io, data->rsrcHead.headerSize, blocksSize, data->blocks); flipRsrcBlockHead(data->blocks); for(i = 0; i < data->blocks->numBlocks; i++) { flipRsrcBlock(&data->blocks->blocks[i]); } HFSPlusCmpfEnd end; memset(&end, 0, sizeof(HFSPlusCmpfEnd)); end.unk1 = 0x1C; end.unk2 = 0x32; end.unk3 = 0x0; end.magic = CMPFS_MAGIC; end.flags = 0xA; end.size = 0xFFFF01; end.unk4 = 0x0; flipHFSPlusCmpfEnd(&end); WRITE(data->io, data->rsrcHead.totalSize, sizeof(HFSPlusCmpfEnd), &end); flipHFSPlusCmpfEnd(&end); CLOSE(data->io); } flipHFSPlusDecmpfs(data->decmpfs); setAttribute(data->volume, data->file->fileID, "com.apple.decmpfs", (uint8_t*)(data->decmpfs), data->decmpfsSize); flipHFSPlusDecmpfs(data->decmpfs); } if(data->cached) free(data->cached); if(data->blocks) free(data->blocks); free(data->decmpfs); free(data); free(io); }
/** Executes the algorithm. Reading in the file and creating and populating * the output workspace * * @throw Exception::FileError If the RAW file cannot be found/opened * @throw std::invalid_argument If the optional properties are set to invalid values */ void LoadRawBin0::exec() { // Retrieve the filename from the properties m_filename = getPropertyValue("Filename"); bool bLoadlogFiles = getProperty("LoadLogFiles"); //open the raw file FILE* file=openRawFile(m_filename); // Need to check that the file is not a text file as the ISISRAW routines don't deal with these very well, i.e // reading continues until a bad_alloc is encountered. if( isAscii(file) ) { g_log.error() << "File \"" << m_filename << "\" is not a valid RAW file.\n"; throw std::invalid_argument("Incorrect file type encountered."); } std::string title; readTitle(file,title); readworkspaceParameters(m_numberOfSpectra,m_numberOfPeriods,m_lengthIn,m_noTimeRegimes); /// setOptionalProperties(); // to validate the optional parameters, if set checkOptionalProperties(); // Calculate the size of a workspace, given its number of periods & spectra to read m_total_specs = calculateWorkspaceSize(); //no real X values for bin 0,so initialize this to zero boost::shared_ptr<MantidVec> channelsVec(new MantidVec(1,0)); m_timeChannelsVec.push_back(channelsVec); double histTotal = static_cast<double>(m_total_specs * m_numberOfPeriods); int64_t histCurrent = -1; // Create the 2D workspace for the output xlength and ylength is one DataObjects::Workspace2D_sptr localWorkspace = createWorkspace(m_total_specs, 1,1,title); Run& run = localWorkspace->mutableRun(); if (bLoadlogFiles) { runLoadLog(m_filename,localWorkspace, 0.0, 0.0); const int period_number = 1; createPeriodLogs(period_number, localWorkspace); } // Set the total proton charge for this run setProtonCharge(run); WorkspaceGroup_sptr ws_grp = createGroupWorkspace(); setWorkspaceProperty("OutputWorkspace", title, ws_grp, localWorkspace,m_numberOfPeriods, false); // Loop over the number of periods in the raw file, putting each period in a separate workspace for (int period = 0; period < m_numberOfPeriods; ++period) { if (period > 0) { localWorkspace=createWorkspace(localWorkspace); if (bLoadlogFiles) { //remove previous period data std::stringstream prevPeriod; prevPeriod << "PERIOD " << (period); Run& runObj = localWorkspace->mutableRun(); runObj.removeLogData(prevPeriod.str()); runObj.removeLogData("current_period"); //add current period data const int period_number = period + 1; createPeriodLogs(period_number, localWorkspace); } } skipData(file, period * (m_numberOfSpectra + 1)); int64_t wsIndex = 0; for (specid_t i = 1; i <= m_numberOfSpectra; ++i) { int64_t histToRead = i + period * (m_numberOfSpectra + 1); if ((i >= m_spec_min && i < m_spec_max) || (m_list && find(m_spec_list.begin(), m_spec_list.end(), i) != m_spec_list.end())) { progress(m_prog, "Reading raw file data..."); //readData(file, histToRead); //read spectrum if (!readData(file, histToRead)) { throw std::runtime_error("Error reading raw file"); } int64_t binStart=0; setWorkspaceData(localWorkspace, m_timeChannelsVec, wsIndex, i, m_noTimeRegimes,1,binStart); ++wsIndex; if (m_numberOfPeriods == 1) { if (++histCurrent % 100 == 0) { m_prog = double(histCurrent) / histTotal; } interruption_point(); } } else { skipData(file, histToRead); } } if(m_numberOfPeriods>1) { setWorkspaceProperty(localWorkspace, ws_grp, period, false); // progress for workspace groups m_prog = static_cast<double>(period) / static_cast<double>(m_numberOfPeriods - 1); } } // loop over periods // Clean up isisRaw.reset(); fclose(file); }
ExtentList* fs_get_extents(int device, int partition, const char* fileName) { unsigned int partitionStart; unsigned int physBlockSize; ExtentList* list = NULL; bdevfs_device_t *dev = bdevfs_open(device, partition); if(!dev) return NULL; physBlockSize = block_device_block_size(dev->handle->device); partitionStart = block_device_get_start(dev->handle); HFSPlusCatalogRecord* record = getRecordFromPath(fileName, dev->volume, NULL, NULL); if(record != NULL) { if(record->recordType == kHFSPlusFileRecord) { io_func* fileIO; HFSPlusCatalogFile* file = (HFSPlusCatalogFile*) record; unsigned int allocationBlockSize = dev->volume->volumeHeader->blockSize; int numExtents = 0; Extent* extent; int i; fileIO = openRawFile(file->fileID, &file->dataFork, record, dev->volume); if(!fileIO) goto out_free; extent = ((RawFile*)fileIO->data)->extents; while(extent != NULL) { numExtents++; extent = extent->next; } list = (ExtentList*) malloc(sizeof(ExtentList)); list->numExtents = numExtents; extent = ((RawFile*)fileIO->data)->extents; for(i = 0; i < list->numExtents; i++) { list->extents[i].startBlock = partitionStart + (extent->startBlock * (allocationBlockSize / physBlockSize)); list->extents[i].blockCount = extent->blockCount * (allocationBlockSize / physBlockSize); extent = extent->next; } CLOSE(fileIO); } else { goto out_free; } } else { goto out_close; } out_free: free(record); out_close: bdevfs_close(dev); return list; }