/*! \brief Allocates the given extent, if available. \return - B_OK: Success. - error code: Failure, the extent (or some portion of it) has already been allocated. */ status_t Allocator::GetExtent(Udf::extent_address extent) { status_t error = InitCheck(); if (!error) { uint32 offset = extent.location(); uint32 length = BlocksFor(extent.length()); // First see if the extent is past the allocation tail, // since we then don't have to do any chunklist traversal if (offset >= Length()) { // Add a new chunk to the end of the chunk list if // necessary if (offset > Length()) { Udf::extent_address chunk(Length(), (offset-Length())<<BlockShift()); fChunkList.push_back(chunk); } // Adjust the tail fLength = offset+length; return B_OK; } else { // Block is not past tail, so check the chunk list for (list<Udf::extent_address>::iterator i = fChunkList.begin(); i != fChunkList.end(); i++) { uint32 chunkOffset = i->location(); uint32 chunkLength = BlocksFor(i->length()); if (chunkOffset <= offset && (offset+length) <= (chunkOffset+chunkLength)) { // Found it. Split the chunk. First look for an orphan // before the block, then after. if (chunkOffset < offset) { // Orhpan before; add a new chunk in front // of the current one Udf::extent_address chunk(chunkOffset, (offset-chunkOffset)<<BlockShift()); fChunkList.insert(i, chunk); } if ((offset+length) < (chunkOffset+chunkLength)) { // Orphan after; resize the original chunk i->set_location(offset+length); i->set_length(((chunkOffset+chunkLength)-(offset+length))<<BlockSize()); } else { // No orphan after; remove the original chunk fChunkList.erase(i); } return B_OK; } } // No matching chunk found, we're SOL. error = B_ERROR; } } return error; }
bool disk_super_block::IsValid() const { if (Magic1() != (int32)SUPER_BLOCK_MAGIC1 || Magic2() != (int32)SUPER_BLOCK_MAGIC2 || Magic3() != (int32)SUPER_BLOCK_MAGIC3 || (int32)block_size != inode_size || ByteOrder() != SUPER_BLOCK_FS_LENDIAN || (1UL << BlockShift()) != BlockSize() || AllocationGroups() < 1 || AllocationGroupShift() < 1 || BlocksPerAllocationGroup() < 1 || NumBlocks() < 10 || AllocationGroups() != divide_roundup(NumBlocks(), 1L << AllocationGroupShift())) return false; return true; }
/*! \brief Returns the number of blocks needed to accomodate the given number of bytes. */ uint32 Allocator::BlocksFor(off_t bytes) { if (BlockSize() == 0) { DEBUG_INIT_ETC("Allocator", ("bytes: %ld\n", bytes)); PRINT(("WARNING: Allocator::BlockSize() == 0!\n")); return 0; } else { off_t blocks = bytes >> BlockShift(); if (bytes % BlockSize() != 0) blocks++; uint64 mask = 0xffffffff; mask <<= 32; if (blocks & mask) { // ToDo: Convert this to actually signal an error DEBUG_INIT_ETC("Allocator", ("bytes: %ld\n", bytes)); PRINT(("WARNING: bytes argument too large for corresponding number " "of blocks to be specified with a uint32! (bytes: %Ld, blocks: %Ld, " "maxblocks: %ld).\n", bytes, blocks, ULONG_MAX)); blocks = 0; } return blocks; } }
status_t Volume::Mount(const char* deviceName, uint32 flags) { // flags |= B_MOUNT_READ_ONLY; // we only support read-only for now if ((flags & B_MOUNT_READ_ONLY) != 0) { TRACE("Volume::Mount(): Read only\n"); } else { TRACE("Volume::Mount(): Read write\n"); } DeviceOpener opener(deviceName, (flags & B_MOUNT_READ_ONLY) != 0 ? O_RDONLY : O_RDWR); fDevice = opener.Device(); if (fDevice < B_OK) { FATAL("Volume::Mount(): couldn't open device\n"); return fDevice; } if (opener.IsReadOnly()) fFlags |= VOLUME_READ_ONLY; TRACE("features %lx, incompatible features %lx, read-only features %lx\n", fSuperBlock.CompatibleFeatures(), fSuperBlock.IncompatibleFeatures(), fSuperBlock.ReadOnlyFeatures()); // read the super block status_t status = Identify(fDevice, &fSuperBlock); if (status != B_OK) { FATAL("Volume::Mount(): Identify() failed\n"); return status; } // check read-only features if mounting read-write if (!IsReadOnly() && _UnsupportedReadOnlyFeatures(fSuperBlock) != 0) return B_UNSUPPORTED; // initialize short hands to the super block (to save byte swapping) fBlockShift = fSuperBlock.BlockShift(); if (fBlockShift < 10 || fBlockShift > 16) return B_ERROR; fBlockSize = 1UL << fBlockShift; fFirstDataBlock = fSuperBlock.FirstDataBlock(); fFreeBlocks = fSuperBlock.FreeBlocks(Has64bitFeature()); fFreeInodes = fSuperBlock.FreeInodes(); off_t numBlocks = fSuperBlock.NumBlocks(Has64bitFeature()) - fFirstDataBlock; uint32 blocksPerGroup = fSuperBlock.BlocksPerGroup(); fNumGroups = numBlocks / blocksPerGroup; if (numBlocks % blocksPerGroup != 0) fNumGroups++; if (Has64bitFeature()) { fGroupDescriptorSize = fSuperBlock.GroupDescriptorSize(); if (fGroupDescriptorSize < sizeof(ext2_block_group)) return B_ERROR; } else fGroupDescriptorSize = EXT2_BLOCK_GROUP_NORMAL_SIZE; fGroupsPerBlock = fBlockSize / fGroupDescriptorSize; fNumInodes = fSuperBlock.NumInodes(); TRACE("block size %ld, num groups %ld, groups per block %ld, first %lu\n", fBlockSize, fNumGroups, fGroupsPerBlock, fFirstDataBlock); uint32 blockCount = (fNumGroups + fGroupsPerBlock - 1) / fGroupsPerBlock; fGroupBlocks = (uint8**)malloc(blockCount * sizeof(uint8*)); if (fGroupBlocks == NULL) return B_NO_MEMORY; memset(fGroupBlocks, 0, blockCount * sizeof(uint8*)); fInodesPerBlock = fBlockSize / InodeSize(); // check if the device size is large enough to hold the file system off_t diskSize; status = opener.GetSize(&diskSize); if (status != B_OK) return status; if (diskSize < ((off_t)NumBlocks() << BlockShift())) return B_BAD_VALUE; fBlockCache = opener.InitCache(NumBlocks(), fBlockSize); if (fBlockCache == NULL) return B_ERROR; TRACE("Volume::Mount(): Initialized block cache: %p\n", fBlockCache); // initialize journal if mounted read-write if (!IsReadOnly() && (fSuperBlock.CompatibleFeatures() & EXT2_FEATURE_HAS_JOURNAL) != 0) { // TODO: There should be a mount option to ignore the existent journal if (fSuperBlock.JournalInode() != 0) { fJournalInode = new(std::nothrow) Inode(this, fSuperBlock.JournalInode()); if (fJournalInode == NULL) return B_NO_MEMORY; TRACE("Opening an on disk, inode mapped journal.\n"); fJournal = new(std::nothrow) InodeJournal(fJournalInode); } else { // TODO: external journal TRACE("Can not open an external journal.\n"); return B_UNSUPPORTED; } } else { TRACE("Opening a fake journal (NoJournal).\n"); fJournal = new(std::nothrow) NoJournal(this); } if (fJournal == NULL) { TRACE("No memory to create the journal\n"); return B_NO_MEMORY; } TRACE("Volume::Mount(): Checking if journal was initialized\n"); status = fJournal->InitCheck(); if (status != B_OK) { FATAL("could not initialize journal!\n"); return status; } // TODO: Only recover if asked to TRACE("Volume::Mount(): Asking journal to recover\n"); status = fJournal->Recover(); if (status != B_OK) { FATAL("could not recover journal!\n"); return status; } TRACE("Volume::Mount(): Restart journal log\n"); status = fJournal->StartLog(); if (status != B_OK) { FATAL("could not initialize start journal!\n"); return status; } if (!IsReadOnly()) { // Initialize allocators fBlockAllocator = new(std::nothrow) BlockAllocator(this); if (fBlockAllocator != NULL) { TRACE("Volume::Mount(): Initialize block allocator\n"); status = fBlockAllocator->Initialize(); } if (fBlockAllocator == NULL || status != B_OK) { delete fBlockAllocator; fBlockAllocator = NULL; FATAL("could not initialize block allocator, going read-only!\n"); fFlags |= VOLUME_READ_ONLY; fJournal->Uninit(); delete fJournal; delete fJournalInode; fJournalInode = NULL; fJournal = new(std::nothrow) NoJournal(this); } } // ready status = get_vnode(fFSVolume, EXT2_ROOT_NODE, (void**)&fRootNode); if (status != B_OK) { FATAL("could not create root node: get_vnode() failed!\n"); return status; } // all went fine opener.Keep(); if (!fSuperBlock.name[0]) { // generate a more or less descriptive volume name off_t divisor = 1ULL << 40; char unit = 'T'; if (diskSize < divisor) { divisor = 1UL << 30; unit = 'G'; if (diskSize < divisor) { divisor = 1UL << 20; unit = 'M'; } } double size = double((10 * diskSize + divisor - 1) / divisor); // %g in the kernel does not support precision... snprintf(fName, sizeof(fName), "%g %cB Ext2 Volume", size / 10, unit); } return B_OK; }
status_t Volume::Mount(const char* deviceName, uint32 flags) { // TODO: validate the FS in write mode as well! #if (B_HOST_IS_LENDIAN && defined(BFS_BIG_ENDIAN_ONLY)) \ || (B_HOST_IS_BENDIAN && defined(BFS_LITTLE_ENDIAN_ONLY)) // in big endian mode, we only mount read-only for now flags |= B_MOUNT_READ_ONLY; #endif DeviceOpener opener(deviceName, (flags & B_MOUNT_READ_ONLY) != 0 ? O_RDONLY : O_RDWR); fDevice = opener.Device(); if (fDevice < B_OK) RETURN_ERROR(fDevice); if (opener.IsReadOnly()) fFlags |= VOLUME_READ_ONLY; // read the superblock if (Identify(fDevice, &fSuperBlock) != B_OK) { FATAL(("invalid superblock!\n")); return B_BAD_VALUE; } // initialize short hands to the superblock (to save byte swapping) fBlockSize = fSuperBlock.BlockSize(); fBlockShift = fSuperBlock.BlockShift(); fAllocationGroupShift = fSuperBlock.AllocationGroupShift(); // check if the device size is large enough to hold the file system off_t diskSize; if (opener.GetSize(&diskSize, &fDeviceBlockSize) != B_OK) RETURN_ERROR(B_ERROR); if (diskSize < (NumBlocks() << BlockShift())) RETURN_ERROR(B_BAD_VALUE); // set the current log pointers, so that journaling will work correctly fLogStart = fSuperBlock.LogStart(); fLogEnd = fSuperBlock.LogEnd(); if ((fBlockCache = opener.InitCache(NumBlocks(), fBlockSize)) == NULL) return B_ERROR; fJournal = new(std::nothrow) Journal(this); if (fJournal == NULL) return B_NO_MEMORY; status_t status = fJournal->InitCheck(); if (status < B_OK) { FATAL(("could not initialize journal: %s!\n", strerror(status))); return status; } // replaying the log is the first thing we will do on this disk status = fJournal->ReplayLog(); if (status != B_OK) { FATAL(("Replaying log failed, data may be corrupted, volume " "read-only.\n")); fFlags |= VOLUME_READ_ONLY; // TODO: if this is the boot volume, Bootscript will assume this // is a CD... // TODO: it would be nice to have a user visible alert instead // of letting him just find this in the syslog. } status = fBlockAllocator.Initialize(); if (status != B_OK) { FATAL(("could not initialize block bitmap allocator!\n")); return status; } fRootNode = new(std::nothrow) Inode(this, ToVnode(Root())); if (fRootNode != NULL && fRootNode->InitCheck() == B_OK) { status = publish_vnode(fVolume, ToVnode(Root()), (void*)fRootNode, &gBFSVnodeOps, fRootNode->Mode(), 0); if (status == B_OK) { // try to get indices root dir if (!Indices().IsZero()) { fIndicesNode = new(std::nothrow) Inode(this, ToVnode(Indices())); } if (fIndicesNode == NULL || fIndicesNode->InitCheck() < B_OK || !fIndicesNode->IsContainer()) { INFORM(("bfs: volume doesn't have indices!\n")); if (fIndicesNode) { // if this is the case, the index root node is gone bad, // and BFS switch to read-only mode fFlags |= VOLUME_READ_ONLY; delete fIndicesNode; fIndicesNode = NULL; } } else { // we don't use the vnode layer to access the indices node } } else { FATAL(("could not create root node: publish_vnode() failed!\n")); delete fRootNode; return status; } } else { status = B_BAD_VALUE; FATAL(("could not create root node!\n")); return status; } // all went fine opener.Keep(); return B_OK; }
/*! \brief Allocates the next available extent of given length. \param length The desired length (in bytes) of the extent. \param contiguous If false, signals that an extent of shorter length will be accepted. This allows for small chunks of unallocated space to be consumed, provided a contiguous chunk is not needed. \param extent Output parameter into which the extent as allocated is stored. Note that the length field of the extent may be shorter than the length parameter passed to this function is \a contiguous is false. \param minimumStartingBlock The minimum acceptable starting block for the extent (used by the physical partition allocator). \return - B_OK: Success. - error code: Failure. */ status_t Allocator::GetNextExtent(uint32 _length, bool contiguous, Udf::extent_address &extent, uint32 minimumStartingBlock) { DEBUG_INIT_ETC("Allocator", ("length: %lld, contiguous: %d", _length, contiguous)); uint32 length = BlocksFor(_length); bool isPartial = false; status_t error = InitCheck(); PRINT(("allocation length: %lu\n", Length())); if (!error) { for (list<Udf::extent_address>::iterator i = fChunkList.begin(); i != fChunkList.end(); i++) { uint32 chunkOffset = i->location(); uint32 chunkLength = BlocksFor(i->length()); if (chunkOffset < minimumStartingBlock) { if (minimumStartingBlock < chunkOffset+chunkLength) { // Start of chunk is below min starting block. See if // any part of the chunk would make for an acceptable // allocation uint32 difference = minimumStartingBlock - chunkOffset; uint32 newOffset = minimumStartingBlock; uint32 newLength = chunkLength-difference; if (length <= newLength) { // new chunk is still long enough Udf::extent_address newExtent(newOffset, _length); if (GetExtent(newExtent) == B_OK) { extent = newExtent; return B_OK; } } else if (!contiguous) { // new chunk is too short, but we're allowed to // allocate a shorter extent, so we'll do it. Udf::extent_address newExtent(newOffset, newLength<<BlockShift()); if (GetExtent(newExtent) == B_OK) { extent = newExtent; return B_OK; } } } } else if (length <= chunkLength) { // Chunk is larger than necessary. Allocate first // length blocks, and resize the chunk appropriately. extent.set_location(chunkOffset); extent.set_length(_length); if (length != chunkLength) { i->set_location(chunkOffset+length); i->set_length((chunkLength-length)<<BlockShift()); } else { fChunkList.erase(i); } return B_OK; } else if (!contiguous) { extent.set_location(chunkOffset); extent.set_length(chunkLength<<BlockShift()); fChunkList.erase(i); return B_OK; } } // No sufficient chunk found, so try to allocate from the tail PRINT(("ULONG_MAX: %lu\n", ULONG_MAX)); uint32 maxLength = ULONG_MAX-Length(); PRINT(("maxLength: %lu\n", maxLength)); error = maxLength > 0 ? B_OK : B_DEVICE_FULL; if (!error) { if (minimumStartingBlock > Tail()) maxLength -= minimumStartingBlock - Tail(); uint32 tail = minimumStartingBlock > Tail() ? minimumStartingBlock : Tail(); if (length > maxLength) { if (contiguous) error = B_DEVICE_FULL; else { isPartial = true; length = maxLength; } } if (!error) { Udf::extent_address newExtent(tail, isPartial ? length<<BlockShift() : _length); if (GetExtent(newExtent) == B_OK) { extent = newExtent; return B_OK; } } } } return error; }