static int jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int no_size_check = 0; int rc = 0; int take_locks; xad_t xad; s64 xaddr; int xflag; s32 xlen; /* * If this is a special inode (imap, dmap) or directory, * the lock should already be taken */ take_locks = ((JFS_IP(ip)->fileset != AGGREGATE_I) && !S_ISDIR(ip->i_mode)); /* * Take appropriate lock on inode */ if (take_locks) { if (create) IWRITE_LOCK(ip); else IREAD_LOCK(ip); } /* * A directory's "data" is the inode index table, but i_size is the * size of the d-tree, so don't check the offset against i_size */ if (S_ISDIR(ip->i_mode)) no_size_check = 1; if ((no_size_check || ((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size)) && (xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, no_size_check) == 0) && xlen) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole */ goto unlock; #ifdef _JFS_4K XADoffset(&xad, lblock64); XADlength(&xad, xlen); XADaddress(&xad, xaddr); #else /* _JFS_4K */ /* * As long as block size = 4K, this isn't a problem. * We should mark the whole page not ABNR, but how * will we know to mark the other blocks BH_New? */ BUG(); #endif /* _JFS_4K */ rc = extRecord(ip, &xad); if (rc) goto unlock; set_buffer_new(bh_result); } map_bh(bh_result, ip->i_sb, xaddr); bh_result->b_size = xlen << ip->i_blkbits; goto unlock; } if (!create) goto unlock; /* * Allocate a new block */ #ifdef _JFS_4K if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) goto unlock; rc = extAlloc(ip, max_blocks, lblock64, &xad, FALSE); if (rc) goto unlock; set_buffer_new(bh_result); map_bh(bh_result, ip->i_sb, addressXAD(&xad)); bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; #else /* _JFS_4K */ /* * We need to do whatever it takes to keep all but the last buffers * in 4K pages - see jfs_write.c */ BUG(); #endif /* _JFS_4K */ unlock: /* * Release lock on inode */ if (take_locks) { if (create) IWRITE_UNLOCK(ip); else IREAD_UNLOCK(ip); } return rc; }
int jfs_get_block(struct inode *ip, sector_t lblock, struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode */ if (create) IWRITE_LOCK(ip, RDWRLOCK_NORMAL); else IREAD_LOCK(ip, RDWRLOCK_NORMAL); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole */ goto unlock; #ifdef _JFS_4K XADoffset(&xad, lblock64); XADlength(&xad, xlen); XADaddress(&xad, xaddr); #else /* _JFS_4K */ /* * As long as block size = 4K, this isn't a problem. * We should mark the whole page not ABNR, but how * will we know to mark the other blocks BH_New? */ BUG(); #endif /* _JFS_4K */ rc = extRecord(ip, &xad); if (rc) goto unlock; set_buffer_new(bh_result); } map_bh(bh_result, ip->i_sb, xaddr); bh_result->b_size = xlen << ip->i_blkbits; goto unlock; } if (!create) goto unlock; /* * Allocate a new block */ #ifdef _JFS_4K if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) goto unlock; rc = extAlloc(ip, xlen, lblock64, &xad, false); if (rc) goto unlock; set_buffer_new(bh_result); map_bh(bh_result, ip->i_sb, addressXAD(&xad)); bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; #else /* _JFS_4K */ /* * We need to do whatever it takes to keep all but the last buffers * in 4K pages - see jfs_write.c */ BUG(); #endif /* _JFS_4K */ unlock: /* * Release lock on inode */ if (create) IWRITE_UNLOCK(ip); else IREAD_UNLOCK(ip); return rc; }
/* * NAME: jfs_defragfs() * * FUNCTION: relocate specified extent for defragfs() * optionally commiting the operation. */ int32 jfs_defragfs( char *pData, /* pointer to buffer containing plist */ uint32 lenData, /* length of buffer */ uint16 *pbufsize) /* pointer of buffer length */ { int32 rc = 0; defragfs_t pList, *p = &pList; uint32 xtype; int64 offset, xoff, oxaddr, nxaddr; int32 xlen; inode_t *ipmnt, *ipimap, *ipbmap; inode_t *ip = NULL; xad_t oxad; pxd_t opxd; mode_t imode; int32 tid; inode_t *iplist[1]; struct vfs *vfsp; if (rc = KernCopyIn(&pList, pData, lenData)) return rc; /* get the 'mount' inode */ for (vfsp = vfs_anchor; vfsp != NULL; vfsp = vfsp->vfs_next) if (vfsp->vfs_vpfsi->vpi_drive == pList.dev) break; if (vfsp == NULL) return EINVAL; xtype = pList.flag; /* sync at start of defragfs ? */ if (xtype & DEFRAGFS_SYNC) { jEVENT(0,("jfs_defragfs: DEFRAGFS_SYNC\n")); if ((vfsp->vfs_flag & VFS_READONLY) || (vfsp->vfs_flag & VFS_ACCEPT)) return 0; ipimap = (inode_t *)vfsp->vfs_data; ipmnt = ipimap->i_ipmnt; ipbmap = ipmnt->i_ipbmap; /* sync the file system */ iSyncFS(vfsp); /* write dirty pages of imap */ diSync(ipimap); /* write dirty pages of bmap */ dbSync(ipbmap); return 0; } else if (!(xtype & DEFRAGFS_RELOCATE)) return EINVAL; if (vfsp->vfs_flag & VFS_READONLY) return EROFS; if (vfsp->vfs_flag & VFS_ACCEPT) return EINVAL; /* get the relocation parameter */ xoff = pList.xoff; oxaddr = pList.old_xaddr; nxaddr = pList.new_xaddr; xlen = pList.xlen; jEVENT(0,("jfs_defragfs: i:%d xtype:0x%08x xoff:%lld xlen:%d xaddr:%lld:%lld\n", pList.ino, xtype, xoff, xlen, oxaddr, nxaddr)); /* get the object inode if it exist */ ICACHE_LOCK(); rc = iget(vfsp, pList.ino, &ip, 0); ICACHE_UNLOCK(); if(rc) { jEVENT(0,("jfs_defragfs: stale target object.\n")); rc = ESTALE; /* stale object ENOENT */ goto out1; } IWRITE_LOCK(ip); /* validate inode */ if (ip->i_nlink == 0 || ip->i_gen != pList.gen || ip->i_fileset != pList.fileset || ip->i_inostamp != pList.inostamp) { jEVENT(0,("jfs_defragfs: stale target object.\n")); rc = ESTALE; /* stale object ENOENT */ goto out1; } /* validate object type: regular file or directory */ imode = ip->i_mode & IFMT; switch(imode) { case IFREG: case IFDIR: break; default: rc = ESTALE; /* invalid object type ENOENT */ goto out1; } /* * try to allocate new destination extent */ if (rc = dbAllocExact(ip, nxaddr, xlen)) { jEVENT(0,("jfs_defragfs: stale destination extent.\n")); rc = ENOSPC; goto out1; } iBindCache(ip); /* * regular file: */ if (imode == IFREG) { /* * automatic commit before and after each relocation * may be skipped after more experience; */ /* * commit any update before relocation */ if (ip->i_flag & IUPD) { ip->i_flag |= IFSYNC; txBegin(ip->i_ipmnt, &tid, 0); iplist[0] = ip; rc = txCommit(tid, 1, &iplist[0], 0); if (rc) goto out2; txEnd(tid); } /* * relocate either xtpage or data extent */ txBegin(ip->i_ipmnt, &tid, 0); /* source extent xad */ XADoffset(&oxad, xoff); XADaddress(&oxad, oxaddr); XADlength(&oxad, xlen); rc = xtRelocate(tid, ip, &oxad, nxaddr, xtype); if (rc) goto out2; iplist[0] = ip; rc = txCommit(tid, 1, &iplist[0], 0); if (rc) goto out2; txEnd(tid); goto out1; } /* * directory: */ else /* IFDIR */ { /* * relocate dtpage */ txBegin(ip->i_ipmnt, &tid, 0); /* source extent pxd */ PXDaddress(&opxd, oxaddr); PXDlength(&opxd, xlen); rc = dtRelocate(tid, ip, xoff, &opxd, nxaddr); if (rc) goto out2; iplist[0] = ip; rc = txCommit(tid, 1, &iplist[0], 0); if (rc) goto out2; txEnd(tid); goto out1; } out2: dbFree(ip, nxaddr, xlen) ; out1: if (ip) { IWRITE_UNLOCK(ip); ICACHE_LOCK(); iput(ip, NULL); ICACHE_UNLOCK(); } jEVENT(0,("jfs_defragfs: rc=%d\n", rc)); return (rc); }