/* * NAME: jfs_lookup(dvp, vpp, pname, flag, vattrp, crp) * * FUNCTION: resolve <pname> in directory <dvp> to vnode <vpp> * with a reference acquired and attribute <vattrp>. * * PARAMETERS: dvp _ directory vnode * vpp - object vnode (out) * pname - object name * flag - * vattrp - object attribute (out) * crp - credential * * RETURN: errors from subroutines. */ jfs_lookup( struct vnode *dvp, struct vnode **vpp, UniChar *pname, /* NULL terminated */ int32 flag, struct vattr *vattrp, struct ucred *crp) { int32 rc = 0; struct vfs *vfsp = dvp->v_vfsp; inode_t *dip = VP2IP(dvp); /* parent directory inode */ ino_t ino; /* object i_number */ inode_t *ip; /* object inode */ component_t dname; /* object name */ ncookie_t ncookie; btstack_t btstack; NOISE(1,("jfs_lookup: dip:0x%08x name:%s\n", dip, pname)); *vpp = NULL; /* <dvp> must be a directory */ if ((dip->i_mode & IFMT) != IFDIR) return ENOTDIR; IREAD_LOCK(dip); if (dip->i_nlink == 0) { rc = ENOENT; goto out; } /* * resolve name to i_number via dnlc/directory lookup */ getInumber: /* * for "." or "..", lookup directory inode */ if (pname[0] == '.') { /* looking up ".." */ if (pname[1] == '.' && pname[2] == '\0') { ino = dip->i_parent; goto getInode; } /* looking up "." */ else if (pname[1] == '\0') { ip = dip; jfs_hold(dvp); *vpp = dvp; goto getAttribute; } } /* * search dnlc/directory */ dname.name = pname; dname.namlen = UniStrlen(pname); if ((ino = ncLookup(dip->i_ipimap, dip->i_number, &dname, &ncookie)) == 0) { /* * dnlc miss: search directory */ if (rc = dtSearch(dip, &dname, &ino, &btstack, JFS_LOOKUP)) goto out; /* insert name entry to dnlc */ ncEnter(dip->i_ipimap, dip->i_number, &dname, ino, &ncookie); } /* * resolve i_number to inode/vnode with a reference */ getInode: ICACHE_LOCK(); rc = iget(vfsp, ino, &ip, 0); ICACHE_UNLOCK(); if (rc) goto out; *vpp = IP2VP(ip); /* * get attribute */ getAttribute: if (vattrp != NULL) get_vattr(ip, vattrp); out: IREAD_UNLOCK(dip); NOISE(1,("jfs_lookup: rc:%d\n", rc)); return rc; }
int jfs_get_block(struct inode *ip, sector_t lblock, struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode */ if (create) IWRITE_LOCK(ip, RDWRLOCK_NORMAL); else IREAD_LOCK(ip, RDWRLOCK_NORMAL); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole */ goto unlock; #ifdef _JFS_4K XADoffset(&xad, lblock64); XADlength(&xad, xlen); XADaddress(&xad, xaddr); #else /* _JFS_4K */ /* * As long as block size = 4K, this isn't a problem. * We should mark the whole page not ABNR, but how * will we know to mark the other blocks BH_New? */ BUG(); #endif /* _JFS_4K */ rc = extRecord(ip, &xad); if (rc) goto unlock; set_buffer_new(bh_result); } map_bh(bh_result, ip->i_sb, xaddr); bh_result->b_size = xlen << ip->i_blkbits; goto unlock; } if (!create) goto unlock; /* * Allocate a new block */ #ifdef _JFS_4K if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) goto unlock; rc = extAlloc(ip, xlen, lblock64, &xad, false); if (rc) goto unlock; set_buffer_new(bh_result); map_bh(bh_result, ip->i_sb, addressXAD(&xad)); bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; #else /* _JFS_4K */ /* * We need to do whatever it takes to keep all but the last buffers * in 4K pages - see jfs_write.c */ BUG(); #endif /* _JFS_4K */ unlock: /* * Release lock on inode */ if (create) IWRITE_UNLOCK(ip); else IREAD_UNLOCK(ip); return rc; }
/* * NAME: jfs_statfs(vfsp, sfsp, crp) * * FUNCTION: get file system status <sfsp> from vfs <vfsp> * * PARAMETER: vfsp - virtual file system * sfsp - file status information structure * crp - credential * * RETURN: zero on success, non-zero on failure * * serialization: statfs() and extendfs() serializes by inode lock * of the inode map for both inode map and block allocation map. * All other access to fragment allocation map is serialized * under VMM locking. * * note: percolation of file system information: * support struct statfs (sys/statfs.h) for get file system status * service call statfs(). * (XPG4.2 defines struct statvfs in sys/statvfs.h for statvfs() * which requires statfs() and additional information) */ jfs_statfs( register struct vfs *vfsp, register struct statfs *statfsp, struct ucred *crp) { register int32 rc; register inode_t *ipmnt; /* mount inode */ inode_t *ipimap, *ipbmap; cbuf_t *bpsuper; struct superblock *sb = NULL; int32 fsck_length, log_length; NOISE(1,("jfs_statfs: vfs:0x%08x\n", vfsp)); /* * get the file system stats from the superblock */ ipimap = (struct inode *)vfsp->vfs_data; ipmnt = ipimap->i_ipmnt; if (rc = readSuper(ipmnt, &bpsuper)) goto out; sb = (struct superblock *)(bpsuper->cm_cdata); /* bcopy(sb->s_fname, statfsp->f_fname, sizeof(sb->s_fname)); */ bcopy(sb->s_fpack, statfsp->f_fpack, sizeof(sb->s_fpack)); statfsp->f_bsize = PSIZE; /* preferred i/o block size */ statfsp->f_fsize = sb->s_bsize; /* fundamental block size */ fsck_length = lengthPXD(&(sb->s_fsckpxd)); log_length = lengthPXD(&(sb->s_logpxd)); rawRelease(bpsuper); /* statfs()/extendfs() serialized by inode lock of the inode map * for both inode map and block allocation map. */ IREAD_LOCK(ipimap); /* * get the block stats from the bmap */ ipbmap = ipmnt->i_ipbmap; statfsp->f_blocks = (ipbmap->i_bmap->db_mapsize + fsck_length + log_length) >> ipbmap->i_bmap->db_l2nbperpage; statfsp->f_bfree = statfsp->f_bavail = ipbmap->i_bmap->db_nfree >> ipbmap->i_bmap->db_l2nbperpage; /* * get the file stats from the ipimap */ statfsp->f_files = ipimap->i_imap->im_numinos; statfsp->f_ffree = ipimap->i_imap->im_numfree; /* * fill in from vfs */ statfsp->f_fsid = vfsp->vfs_fsid; statfsp->f_vfstype = MNT_XJFS; statfsp->f_vfsnumber = vfsp->vfs_number; statfsp->f_name_max = JFS_NAME_MAX; /* * fields in the statfs structure that we don't fill in ... * long f_version; version/type of statfs, 0 for now long f_type; type of info, 0 for now long f_vfsoff; reserved, for vfs specific data offset long f_vfslen; reserved, for len of vfs specific data long f_vfsvers; reserved, for vers of vfs specific data */ out: IREAD_UNLOCK(ipimap); return rc; }
static int jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int no_size_check = 0; int rc = 0; int take_locks; xad_t xad; s64 xaddr; int xflag; s32 xlen; /* * If this is a special inode (imap, dmap) or directory, * the lock should already be taken */ take_locks = ((JFS_IP(ip)->fileset != AGGREGATE_I) && !S_ISDIR(ip->i_mode)); /* * Take appropriate lock on inode */ if (take_locks) { if (create) IWRITE_LOCK(ip); else IREAD_LOCK(ip); } /* * A directory's "data" is the inode index table, but i_size is the * size of the d-tree, so don't check the offset against i_size */ if (S_ISDIR(ip->i_mode)) no_size_check = 1; if ((no_size_check || ((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size)) && (xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, no_size_check) == 0) && xlen) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole */ goto unlock; #ifdef _JFS_4K XADoffset(&xad, lblock64); XADlength(&xad, xlen); XADaddress(&xad, xaddr); #else /* _JFS_4K */ /* * As long as block size = 4K, this isn't a problem. * We should mark the whole page not ABNR, but how * will we know to mark the other blocks BH_New? */ BUG(); #endif /* _JFS_4K */ rc = extRecord(ip, &xad); if (rc) goto unlock; set_buffer_new(bh_result); } map_bh(bh_result, ip->i_sb, xaddr); bh_result->b_size = xlen << ip->i_blkbits; goto unlock; } if (!create) goto unlock; /* * Allocate a new block */ #ifdef _JFS_4K if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) goto unlock; rc = extAlloc(ip, max_blocks, lblock64, &xad, FALSE); if (rc) goto unlock; set_buffer_new(bh_result); map_bh(bh_result, ip->i_sb, addressXAD(&xad)); bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; #else /* _JFS_4K */ /* * We need to do whatever it takes to keep all but the last buffers * in 4K pages - see jfs_write.c */ BUG(); #endif /* _JFS_4K */ unlock: /* * Release lock on inode */ if (take_locks) { if (create) IWRITE_UNLOCK(ip); else IREAD_UNLOCK(ip); } return rc; }
/* * NAME: readdir * * FUNCTION: read directory according to specifications * in directory search structure * * PARAMETER: * * RETURN: EINVAL - if not a directory * errors from subroutines * * note: * N.B. directory file offset encodes (directory page number, * entry index number), and shold NOT be interpreted/modified * by caller (lseek()) except that intial offset set to 0. * * no guarantees can be made that the exact offset * requested can be found if directory has been updated * by other threads between consecutive readdir()s. * transfer length of zero signals start offset beyond eof. * * unused space in the directory are not returned to the user, * i.e., more than requested size may have to be read * from directory to fill the user's buffer. */ readdir( struct vnode *dvp, /* inode of directory being read */ struct fsfd *fsfp, /* directory search information */ char *ubuf, /* user's data area */ uint32 ubytes, /* size of user's data area */ uint32 *matchcnt, /* count of entries returned */ uint32 level, /* level of output struct */ uint32 flags, /* offsets needed in output? */ EAOP *eaopp, /* pointer to EAOP */ struct ucred *crp) { int32 rc = 0; int32 ReturnCode = NO_ERROR; inode_t *dip; /* directory inode */ inode_t *ip; /* object inode */ uint32 matches; /* output matches found */ uint32 dtmatches; /* matches found per dtFind call */ uint32 position; /* offsets in output */ uint32 count; /* output buffer count */ int32 tbytes; /* byte count in dirent buffer */ struct dirent *dbuf; /* dirent buffer */ struct dirent *dbufp; /* dirent buffer */ uint32 ffhdsize; /* size of ffbuf header */ component_t lastmatch; /* pointer to last matching entry */ char *ffbuf; /* output buffer pointer */ char *nxbuf; /* output buffer pointer */ char *bufp; /* output buffer pointer */ MMPHPrereaddir(); /* MMPH Performance Hook */ /* set state from search structure */ dip = VP2IP(dvp); position = flags & FF_GETPOS; /* validate request */ if (ubytes == 0) { rc = EINVAL; goto readdir_Exit; } /* continuous read of empty directory ? */ if (fsfp->fsd_offset == -1) { rc = ERROR_NO_MORE_FILES; goto readdir_Exit; } dbuf = (struct dirent *)allocpool(dirent_pool, 0); // D228565 if (dbuf == 0) // D228565 { rc = ENOMEM; goto readdir_Exit; } /* set up variable to manipulate output buffer pointers * based on level. */ if (level == 1) ffhdsize = FFBUFHD; else if (level == 11) ffhdsize = FFBUFHD3L; else if (level < 11) ffhdsize = FFBUFHD2; else ffhdsize = FFBUFHD4L; if (position) ffhdsize += sizeof(uint32); ffbuf = ubuf; count = 0; matches = *matchcnt; *matchcnt = 0; while ((*matchcnt < matches) && (rc == 0)) { IREAD_LOCK(dip); /* directory became void when last link was removed */ if ((dip->i_nlink == 0) || ((dip->i_mode & IFMT) != IFDIR)) { IREAD_UNLOCK(dip); freepool(dirent_pool, (caddr_t *)dbuf); rc = ENOTDIR; goto readdir_Exit; } /* fill a directory buffer. * read on-disk structure (struct ldtentry_t) and * translate into readdir() structure (struct dirent). */ tbytes = 0; dtmatches = matches - *matchcnt; dbufp = dbuf; // D228565 rc = dtFind(dip, &fsfp->fsd_pattern, fsfp->fsd_lastmatch, &fsfp->fsd_offset, &dtmatches, PSIZE, &tbytes, dbufp); IREAD_UNLOCK(dip); if (rc) { freepool(dirent_pool, (caddr_t *)dbuf); goto readdir_Exit; } /* copy translate buffer to user FileFindBuf buffer */ while ((*matchcnt < matches) && (ReturnCode == NO_ERROR)) { uint32 namlen; /* translation buffer empty? */ if (tbytes == 0) break; /* get size of next name */ namlen = dbufp->d_namlen; /* user buffer full? * the +1 here is to allow for the null character * terminating the name string. */ if ((count + ffhdsize + namlen + 1) > ubytes) { rc = ERROR_BUFFER_OVERFLOW; break; } /* get the inode for the file */ ICACHE_LOCK(); rc = iget(dvp->v_vfsp, dbufp->d_ino, &ip, 0); ICACHE_UNLOCK(); if (rc) goto try_next; nxbuf = ffbuf; /* fill in file search info for files that have * the proper attributes; ignore others. */ rc = get_fileinfo(ip, &nxbuf, ubytes, dbufp->d_name, namlen, fsfp->fsd_havattr, level, eaopp, flags); if ((rc == ERROR_BUFFER_OVERFLOW) && (*matchcnt == 0) && ((level == FIL_QUERYEASFROMLIST) || (level == FIL_QUERYEASFROMLISTL))) { /* Can't fit EA in buffer, try without * getting EA */ if (level == FIL_QUERYEASFROMLIST) level = FIL_QUERYEASIZE; else level = FIL_QUERYEASIZEL; ReturnCode = ERROR_EAS_DIDNT_FIT; rc = get_fileinfo(ip, &nxbuf, ubytes, dbufp->d_name, namlen, fsfp->fsd_havattr, level, eaopp, flags); } /* release the inode */ jfs_rele(IP2VP(ip)); if (rc == 0) { /* set offset if requested */ if (position) { rc = KernCopyOut(ffbuf, &dbufp->d_offset, sizeof(int32)); if (rc) { /* This is very unlikely to * happen! */ ASSERT(0); break; } } /* update output buffer count */ count += nxbuf - ffbuf; /* move to next entry in output buffer */ ffbuf = nxbuf; /* update match count */ *matchcnt += 1; } else if (rc != -1) break; try_next: /* rc == -1 indicates no attribute match, * just keep going. */ rc = 0; /* save name for next call setup */ lastmatch.name = dbufp->d_name; lastmatch.namlen = namlen; /* update dirent buffer count */ tbytes -= dbufp->d_reclen; /* move to next entry in dirent buffer */ dbufp = (struct dirent *) ((caddr_t)dbufp + dbufp->d_reclen); } /* We don't want to continue if ReturnCode = ERROR_EAS_DIDNT_FIT */ if (rc == 0) rc = ReturnCode; /* set return code for end of directory with no matches */ if (fsfp->fsd_offset == -1) rc = ERROR_NO_MORE_FILES; else if ((rc == 0) || (rc == ERROR_EAS_DIDNT_FIT)) { /* save last matching name for next call */ UniStrncpy(fsfp->fsd_lastmatch,lastmatch.name, lastmatch.namlen); fsfp->fsd_lastmatch[lastmatch.namlen] = '\0'; } } /* claim success if we return any entries */ if (*matchcnt != 0) rc = ReturnCode; freepool(dirent_pool, (caddr_t *)dbuf); readdir_Exit: MMPHPostreaddir(); /* MMPH Performance Hook */ return rc; }
/* * NAME: jfs_readlink(vp, uiop, crp) * * FUNCTION: read a symbolic link <vp> into <uiop> * * PARAMETER: vp - pointer to the vnode that represents the * symlink we want to read * uiop - How much to read and where it goes * crp - credential * * RETURN: EINVAL - if not a symbolic link * errors from subroutines */ jfs_readlink( struct vnode *vp, /* symlink vnode */ struct uio *uiop, struct ucred *crp) { int32 rc = 0; inode_t *ip = VP2IP(vp); int32 cnt; jbuf_t *bp; NOISE(1,("jfs_readlink: ip:0x%08x\n", ip)); if (vp->v_vntype != VLNK) return EINVAL; IREAD_LOCK(ip); /* validate the buffer size vs target path name size * (AES requires ERANGE if the link name won't fit) */ if (ip->i_size > uiop->uio_resid) { rc = ERANGE; goto out; } /* * read the target path name */ if (ip->i_size <= IDATASIZE) { /* * fast symbolic link * * read target path name inline from on-disk inode */ cnt = MIN(ip->i_size, uiop->uio_iov->iov_len); rc = uiomove(ip->i_fastsymlink, cnt, UIO_READ, uiop); } else { /* * read target path name from a single extent * * target path name <= PATH_MAX < buffer page size * * even though the data of symlink object (target * path name) is treated as non-journaled user data, * it is read/written thru buffer cache for performance. */ if (rc = bmRead(ip, 0, ip->i_size, bmREAD_PAGE, &bp)) goto out; cnt = MIN(ip->i_size, uiop->uio_iov->iov_len); rc = uiomove(bp->b_bdata, cnt, UIO_READ, uiop); bmRelease(bp); } out: IREAD_UNLOCK(ip); NOISE(1,("jfs_readlink: rc:%d\n", rc)); return rc; }
/* * NAME: dbFree() * * FUNCTION: free the specified block range from the working block * allocation map. * * the blocks will be free from the working map one dmap * at a time. * * PARAMETERS: * ip - pointer to in-core inode; * blkno - starting block number to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error */ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) { struct metapage *mp; struct dmap *dp; int nb, rc; s64 lblkno, rem; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct super_block *sb = ipbmap->i_sb; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* block to be freed better be within the mapsize. */ if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { IREAD_UNLOCK(ipbmap); printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(ip->i_sb, "dbFree: block to be freed is outside the map"); return -EIO; } /** * TRIM the blocks, when mounted with discard option */ if (JFS_SBI(sb)->flag & JFS_DISCARD) if (JFS_SBI(sb)->minblks_trim <= nblocks) jfs_issue_discard(ipbmap, blkno, nblocks); /* * free the blocks a dmap at a time. */ mp = NULL; for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { /* release previous dmap if any */ if (mp) { write_metapage(mp); } /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* determine the number of blocks to be freed from * this dmap. */ nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); /* free the blocks. */ if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { jfs_error(ip->i_sb, "dbFree: error in block map\n"); release_metapage(mp); IREAD_UNLOCK(ipbmap); return (rc); } } /* write the last buffer. */ write_metapage(mp); IREAD_UNLOCK(ipbmap); return (0); }