__private_extern__ OSErr BlockAllocate ( ExtendedVCB *vcb, /* which volume to allocate space on */ u_int32_t startingBlock, /* preferred starting block, or 0 for no preference */ u_int32_t minBlocks, /* desired number of blocks to allocate */ u_int32_t maxBlocks, /* maximum number of blocks to allocate */ Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */ /* minBlocks bytes to actually be allocated */ Boolean useMetaZone, u_int32_t *actualStartBlock, /* actual first block of allocation */ u_int32_t *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */ /* was zero, then this may represent fewer than minBlocks */ { u_int32_t freeBlocks; OSErr err; Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated // // Initialize outputs in case we get an error // *actualStartBlock = 0; *actualNumBlocks = 0; freeBlocks = hfs_freeblks(VCBTOHFS(vcb), 0); // // If the disk is already full, don't bother. // if (freeBlocks == 0) { err = dskFulErr; goto Exit; } if (forceContiguous && freeBlocks < minBlocks) { err = dskFulErr; goto Exit; } /* * Clip if necessary so we don't over-subscribe the free blocks. */ if (minBlocks > freeBlocks) { minBlocks = freeBlocks; } if (maxBlocks > freeBlocks) { maxBlocks = freeBlocks; } // // If caller didn't specify a starting block number, then use the volume's // next block to allocate from. // if (startingBlock == 0) { HFS_MOUNT_LOCK(vcb, TRUE); startingBlock = vcb->nextAllocation; HFS_MOUNT_UNLOCK(vcb, TRUE); updateAllocPtr = true; } if (startingBlock >= vcb->allocLimit) { startingBlock = 0; /* overflow so start at beginning */ } // // If the request must be contiguous, then find a sequence of free blocks // that is long enough. Otherwise, find the first free block. // if (forceContiguous) { err = BlockAllocateContig(vcb, startingBlock, minBlocks, maxBlocks, useMetaZone, actualStartBlock, actualNumBlocks); /* * If we allocated from a new position then * also update the roving allocator. */ if ((err == noErr) && (*actualStartBlock > startingBlock) && ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) || (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) { HFS_MOUNT_LOCK(vcb, TRUE); HFS_UPDATE_NEXT_ALLOCATION(vcb, *actualStartBlock); HFS_MOUNT_UNLOCK(vcb, TRUE); } } else { /* * Scan the bitmap once, gather the N largest free extents, then * allocate from these largest extents. Repeat as needed until * we get all the space we needed. We could probably build up * that list when the higher level caller tried (and failed) a * contiguous allocation first. */ err = BlockAllocateKnown(vcb, maxBlocks, actualStartBlock, actualNumBlocks); if (err == dskFulErr) err = BlockAllocateAny(vcb, startingBlock, vcb->allocLimit, maxBlocks, useMetaZone, actualStartBlock, actualNumBlocks); if (err == dskFulErr) err = BlockAllocateAny(vcb, 1, startingBlock, maxBlocks, useMetaZone, actualStartBlock, actualNumBlocks); } Exit: // if we actually allocated something then go update the // various bits of state that we maintain regardless of // whether there was an error (i.e. partial allocations // still need to update things like the free block count). // if (*actualNumBlocks != 0) { // // If we used the volume's roving allocation pointer, then we need to update it. // Adding in the length of the current allocation might reduce the next allocate // call by avoiding a re-scan of the already allocated space. However, the clump // just allocated can quite conceivably end up being truncated or released when // the file is closed or its EOF changed. Leaving the allocation pointer at the // start of the last allocation will avoid unnecessary fragmentation in this case. // HFS_MOUNT_LOCK(vcb, TRUE); if (updateAllocPtr && ((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) || (*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) { HFS_UPDATE_NEXT_ALLOCATION(vcb, *actualStartBlock); } // // Update the number of free blocks on the volume // vcb->freeBlocks -= *actualNumBlocks; MarkVCBDirty(vcb); HFS_MOUNT_UNLOCK(vcb, TRUE); hfs_generate_volume_notifications(VCBTOHFS(vcb)); } return err; }
/* * 2 locks are needed (dvp and vp) * also need catalog lock * * caller's responsibility: * componentname cleanup * unlocking dvp and vp */ static int hfs_makelink(struct hfsmount *hfsmp, struct cnode *cp, struct cnode *dcp, struct componentname *cnp) { struct proc *p = cnp->cn_proc; u_int32_t indnodeno = 0; char inodename[32]; struct cat_desc to_desc; int newlink = 0; int retval; cat_cookie_t cookie = {0}; /* We don't allow link nodes in our Private Meta Data folder! */ if (dcp->c_fileid == hfsmp->hfs_privdir_desc.cd_cnid) return (EPERM); if (hfs_freeblks(hfsmp, 0) == 0) return (ENOSPC); /* Reserve some space in the Catalog file. */ if ((retval = cat_preflight(hfsmp, (2 * CAT_CREATE)+ CAT_RENAME, &cookie, p))) { return (retval); } /* Lock catalog b-tree */ retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); if (retval) { goto out2; } /* * If this is a new hardlink then we need to create the data * node (inode) and replace the original file with a link node. */ if (cp->c_nlink == 2 && (cp->c_flag & C_HARDLINK) == 0) { newlink = 1; bzero(&to_desc, sizeof(to_desc)); to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; to_desc.cd_cnid = cp->c_fileid; do { /* get a unique indirect node number */ indnodeno = ((random() & 0x3fffffff) + 100); MAKE_INODE_NAME(inodename, indnodeno); /* move source file to data node directory */ to_desc.cd_nameptr = inodename; to_desc.cd_namelen = strlen(inodename); retval = cat_rename(hfsmp, &cp->c_desc, &hfsmp->hfs_privdir_desc, &to_desc, NULL); } while (retval == EEXIST); if (retval) goto out; /* Replace source file with link node */ retval = createindirectlink(hfsmp, indnodeno, cp->c_parentcnid, cp->c_desc.cd_nameptr, &cp->c_desc.cd_cnid); if (retval) { /* put it source file back */ // XXXdbg #if 1 { int err; err = cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); if (err) panic("hfs_makelink: error %d from cat_rename backout 1", err); } #else (void) cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); #endif goto out; } cp->c_rdev = indnodeno; } else { indnodeno = cp->c_rdev; } /* * Create a catalog entry for the new link (parentID + name). */ retval = createindirectlink(hfsmp, indnodeno, dcp->c_fileid, cnp->cn_nameptr, NULL); if (retval && newlink) { /* Get rid of new link */ (void) cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); /* Put the source file back */ // XXXdbg #if 1 { int err; err = cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); if (err) panic("hfs_makelink: error %d from cat_rename backout 2", err); } #else (void) cat_rename(hfsmp, &to_desc, &dcp->c_desc, &cp->c_desc, NULL); #endif goto out; } /* * Finally, if this is a new hardlink then: * - update HFS Private Data dir * - mark the cnode as a hard link */ if (newlink) { hfsmp->hfs_privdir_attr.ca_entries++; (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc, &hfsmp->hfs_privdir_attr, NULL, NULL); hfs_volupdate(hfsmp, VOL_MKFILE, 0); cp->c_flag |= (C_CHANGE | C_HARDLINK); } out: /* Unlock catalog b-tree */ (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); out2: cat_postflight(hfsmp, &cookie, p); return (retval); }