Example #1
0
OSErr BlockDeallocate (
	SVCB		*vcb,			//	Which volume to deallocate space on
	UInt32			firstBlock,		//	First block in range to deallocate
	UInt32			numBlocks)		//	Number of contiguous blocks to deallocate
{
	OSErr			err;
	

	//
	//	If no blocks to deallocate, then exit early
	//
	if (numBlocks == 0) {
		err = noErr;
		goto Exit;
	}

	//
	//	Call internal routine to free the sequence of blocks
	//
	err = BlockMarkFree(vcb, firstBlock, numBlocks);
	if (err)
		goto Exit;

	//
	//	Update the volume's free block count, and mark the VCB as dirty.
	//
	vcb->vcbFreeBlocks += numBlocks;
	MarkVCBDirty(vcb);

Exit:

	return err;
}
OSErr
FlushCatalog(ExtendedVCB *volume)
{
	FCB *	fcb;
	OSErr	result;
	
	fcb = GetFileControlBlock(volume->catalogRefNum);
	result = BTFlushPath(fcb);

	if (result == noErr)
	{
		//--- check if catalog's fcb is dirty...
		
		if ( 0 /*fcb->fcbFlags & fcbModifiedMask*/ )
		{
			HFS_MOUNT_LOCK(volume, TRUE);
			MarkVCBDirty(volume);	// Mark the VCB dirty
			volume->vcbLsMod = GetTimeUTC();	// update last modified date
			HFS_MOUNT_UNLOCK(volume, TRUE);

		//	result = FlushVolumeControlBlock(volume);
		}
	}
	
	return result;
}
__private_extern__
OSErr BlockDeallocate (
	ExtendedVCB		*vcb,			//	Which volume to deallocate space on
	u_int32_t		firstBlock,		//	First block in range to deallocate
	u_int32_t		numBlocks)		//	Number of contiguous blocks to deallocate
{
	OSErr			err;
	
	//
	//	If no blocks to deallocate, then exit early
	//
	if (numBlocks == 0) {
		err = noErr;
		goto Exit;
	}

	//
	//	Call internal routine to free the sequence of blocks
	//
	err = BlockMarkFree(vcb, firstBlock, numBlocks);
	if (err)
		goto Exit;

	//
	//	Update the volume's free block count, and mark the VCB as dirty.
	//
	HFS_MOUNT_LOCK(vcb, TRUE);
	vcb->freeBlocks += numBlocks;
	if (vcb->nextAllocation == (firstBlock + numBlocks))
		HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks));
	MarkVCBDirty(vcb);
  	HFS_MOUNT_UNLOCK(vcb, TRUE); 

	hfs_generate_volume_notifications(VCBTOHFS(vcb));
Exit:

	return err;
}
__private_extern__
OSErr BlockAllocate (
	ExtendedVCB		*vcb,				/* which volume to allocate space on */
	u_int32_t		startingBlock,		/* preferred starting block, or 0 for no preference */
	u_int32_t		minBlocks,		/* desired number of blocks to allocate */
	u_int32_t		maxBlocks,		/* maximum number of blocks to allocate */
	Boolean			forceContiguous,	/* non-zero to force contiguous allocation and to force */
							/* minBlocks bytes to actually be allocated */
							
	Boolean	useMetaZone,
	u_int32_t		*actualStartBlock,	/* actual first block of allocation */
	u_int32_t		*actualNumBlocks)	/* number of blocks actually allocated; if forceContiguous */
							/* was zero, then this may represent fewer than minBlocks */
{
	u_int32_t  freeBlocks;
	OSErr			err;
	Boolean			updateAllocPtr = false;		//	true if nextAllocation needs to be updated

	//
	//	Initialize outputs in case we get an error
	//
	*actualStartBlock = 0;
	*actualNumBlocks = 0;
	freeBlocks = hfs_freeblks(VCBTOHFS(vcb), 0);
	
	//
	//	If the disk is already full, don't bother.
	//
	if (freeBlocks == 0) {
		err = dskFulErr;
		goto Exit;
	}
	if (forceContiguous && freeBlocks < minBlocks) {
		err = dskFulErr;
		goto Exit;
	}
	/*
	 * Clip if necessary so we don't over-subscribe the free blocks.
	 */
	if (minBlocks > freeBlocks) {
		minBlocks = freeBlocks;
	}
	if (maxBlocks > freeBlocks) {
		maxBlocks = freeBlocks;
	}

	//
	//	If caller didn't specify a starting block number, then use the volume's
	//	next block to allocate from.
	//
	if (startingBlock == 0) {
		HFS_MOUNT_LOCK(vcb, TRUE);
		startingBlock = vcb->nextAllocation;
		HFS_MOUNT_UNLOCK(vcb, TRUE);
		updateAllocPtr = true;
	}
	if (startingBlock >= vcb->allocLimit) {
		startingBlock = 0; /* overflow so start at beginning */
	}

	//
	//	If the request must be contiguous, then find a sequence of free blocks
	//	that is long enough.  Otherwise, find the first free block.
	//
	if (forceContiguous) {
		err = BlockAllocateContig(vcb, startingBlock, minBlocks, maxBlocks,
		                          useMetaZone, actualStartBlock, actualNumBlocks);
		/*
		 * If we allocated from a new position then
		 * also update the roving allocator.
		 */
		if ((err == noErr) &&
		    (*actualStartBlock > startingBlock) &&
		    ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) ||
	    	     (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) {
			HFS_MOUNT_LOCK(vcb, TRUE);
			HFS_UPDATE_NEXT_ALLOCATION(vcb, *actualStartBlock);
			HFS_MOUNT_UNLOCK(vcb, TRUE);
		}
	} else {
		/*
		 * Scan the bitmap once, gather the N largest free extents, then
		 * allocate from these largest extents.  Repeat as needed until
		 * we get all the space we needed.  We could probably build up
		 * that list when the higher level caller tried (and failed) a
		 * contiguous allocation first.
		 */
		err = BlockAllocateKnown(vcb, maxBlocks, actualStartBlock, actualNumBlocks);
		if (err == dskFulErr)
			err = BlockAllocateAny(vcb, startingBlock, vcb->allocLimit,
			                       maxBlocks, useMetaZone, actualStartBlock,
			                       actualNumBlocks);
		if (err == dskFulErr)
			err = BlockAllocateAny(vcb, 1, startingBlock, maxBlocks,
			                       useMetaZone, actualStartBlock,
			                       actualNumBlocks);
	}

Exit:
	// if we actually allocated something then go update the
	// various bits of state that we maintain regardless of
	// whether there was an error (i.e. partial allocations
	// still need to update things like the free block count).
	//
	if (*actualNumBlocks != 0) {
		//
		//	If we used the volume's roving allocation pointer, then we need to update it.
		//	Adding in the length of the current allocation might reduce the next allocate
		//	call by avoiding a re-scan of the already allocated space.  However, the clump
		//	just allocated can quite conceivably end up being truncated or released when
		//	the file is closed or its EOF changed.  Leaving the allocation pointer at the
		//	start of the last allocation will avoid unnecessary fragmentation in this case.
		//
		HFS_MOUNT_LOCK(vcb, TRUE);

		if (updateAllocPtr &&
		    ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) ||
	    	     (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) {
			HFS_UPDATE_NEXT_ALLOCATION(vcb, *actualStartBlock);
		}
		//
		//	Update the number of free blocks on the volume
		//
		vcb->freeBlocks -= *actualNumBlocks;
		MarkVCBDirty(vcb);
		HFS_MOUNT_UNLOCK(vcb, TRUE);

		hfs_generate_volume_notifications(VCBTOHFS(vcb));
	}
	
	return err;
}
Example #5
0
OSErr BlockAllocate (
	SVCB		*vcb,				/* which volume to allocate space on */
	UInt32			startingBlock,		/* preferred starting block, or 0 for no preference */
	UInt32			blocksRequested,	/* desired number of BYTES to allocate */
	UInt32			blocksMaximum,		/* maximum number of bytes to allocate */
	Boolean			forceContiguous,	/* non-zero to force contiguous allocation and to force */
										/* bytesRequested bytes to actually be allocated */
	UInt32			*actualStartBlock,	/* actual first block of allocation */
	UInt32			*actualNumBlocks)	/* number of blocks actually allocated; if forceContiguous */
										/* was zero, then this may represent fewer than bytesRequested */
										/* bytes */
{
	OSErr			err;
	Boolean			updateAllocPtr = false;		//	true if nextAllocation needs to be updated

	//
	//	Initialize outputs in case we get an error
	//
	*actualStartBlock = 0;
	*actualNumBlocks = 0;
	
	//
	//	If the disk is already full, don't bother.
	//
	if (vcb->vcbFreeBlocks == 0) {
		err = dskFulErr;
		goto Exit;
	}
	if (forceContiguous && vcb->vcbFreeBlocks < blocksRequested) {
		err = dskFulErr;
		goto Exit;
	}
	
	//
	//	If caller didn't specify a starting block number, then use the volume's
	//	next block to allocate from.
	//
	if (startingBlock == 0) {
		startingBlock = vcb->vcbNextAllocation;
		updateAllocPtr = true;
	}
	
	//
	//	If the request must be contiguous, then find a sequence of free blocks
	//	that is long enough.  Otherwise, find the first free block.
	//
	if (forceContiguous)
		err = BlockAllocateContig(vcb, startingBlock, blocksRequested, blocksMaximum, actualStartBlock, actualNumBlocks);
	else {
		//	We'll try to allocate contiguous space first.  If that fails, we'll fall back to finding whatever tiny
		//	extents we can find.  It would be nice if we kept track of the largest N free extents so that falling
		//	back grabbed a small number of large extents.
		err = BlockAllocateContig(vcb, startingBlock, blocksRequested, blocksMaximum, actualStartBlock, actualNumBlocks);
		if (err == dskFulErr)
			err = BlockAllocateAny(vcb, startingBlock, vcb->vcbTotalBlocks, blocksMaximum, actualStartBlock, actualNumBlocks);
		if (err == dskFulErr)
			err = BlockAllocateAny(vcb, 0, startingBlock, blocksMaximum, actualStartBlock, actualNumBlocks);
	}

	if (err == noErr) {
		//
		//	If we used the volume's roving allocation pointer, then we need to update it.
		//	Adding in the length of the current allocation might reduce the next allocate
		//	call by avoiding a re-scan of the already allocated space.  However, the clump
		//	just allocated can quite conceivably end up being truncated or released when
		//	the file is closed or its EOF changed.  Leaving the allocation pointer at the
		//	start of the last allocation will avoid unnecessary fragmentation in this case.
		//
		if (updateAllocPtr)
			vcb->vcbNextAllocation = *actualStartBlock;
		
		//
		//	Update the number of free blocks on the volume
		//
		vcb->vcbFreeBlocks -= *actualNumBlocks;
		MarkVCBDirty(vcb);
	}
	
Exit:

	return err;
}