static int xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp) { xfs_ino_t ino; int namelen_sum; int count; xfs_dir_shortform_t *sf; xfs_dir_sf_entry_t *sfe; int i; if ((INT_GET(dp->di_core.di_mode, ARCH_CONVERT) & IFMT) != IFDIR) { return 0; } if (INT_GET(dp->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_LOCAL) { return 0; } if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) { xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p\n", dp); return 1; } sf = (xfs_dir_shortform_t *)(&dp->di_u.di_dirsf); ino = XFS_GET_DIR_INO_ARCH(mp, sf->hdr.parent, ARCH_CONVERT); if (xfs_dir_ino_validate(mp, ino)) return 1; count = sf->hdr.count; if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) { xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform count: dp 0x%p\n", dp); return(1); } if (count == 0) { return 0; } namelen_sum = 0; sfe = &sf->list[0]; for (i = sf->hdr.count - 1; i >= 0; i--) { ino = XFS_GET_DIR_INO_ARCH(mp, sfe->inumber, ARCH_CONVERT); xfs_dir_ino_validate(mp, ino); if (sfe->namelen >= XFS_LITINO(mp)) { xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform namelen: dp 0x%p\n", dp); return 1; } namelen_sum += sfe->namelen; sfe = XFS_DIR_SF_NEXTENTRY(sfe); } if (namelen_sum >= XFS_LITINO(mp)) { xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform namelen: dp 0x%p\n", dp); return 1; } return 0; }
/* ARGSUSED */ STATIC int xfs_qm_dqread( xfs_trans_t *tp, xfs_dqid_t id, xfs_dquot_t *dqp, /* dquot to get filled in */ uint flags) { xfs_disk_dquot_t *ddqp; xfs_buf_t *bp; int error; /* * get a pointer to the on-disk dquot and the buffer containing it * dqp already knows its own type (GROUP/USER). */ xfs_dqtrace_entry(dqp, "DQREAD"); if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) { return (error); } /* copy everything from disk dquot to the incore dquot */ memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); xfs_qm_dquot_logitem_init(dqp); /* * Reservation counters are defined as reservation plus current usage * to avoid having to add everytime. */ dqp->q_res_bcount = INT_GET(ddqp->d_bcount, ARCH_CONVERT); dqp->q_res_icount = INT_GET(ddqp->d_icount, ARCH_CONVERT); dqp->q_res_rtbcount = INT_GET(ddqp->d_rtbcount, ARCH_CONVERT); /* Mark the buf so that this will stay incore a little longer */ XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); /* * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) * So we need to release with xfs_trans_brelse(). * The strategy here is identical to that of inodes; we lock * the dquot in xfs_qm_dqget() before making it accessible to * others. This is because dquots, like inodes, need a good level of * concurrency, and we don't want to take locks on the entire buffers * for dquot accesses. * Note also that the dquot buffer may even be dirty at this point, if * this particular dquot was repaired. We still aren't afraid to * brelse it because we have the changes incore. */ ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); xfs_trans_brelse(tp, bp); return (error); }
static int bnobt_key_count( void *obj, int startoff) { xfs_alloc_block_t *block; ASSERT(startoff == 0); block = obj; if (INT_GET(block->bb_level, ARCH_CONVERT) == 0) return 0; return INT_GET(block->bb_numrecs, ARCH_CONVERT); }
/*ARGSUSED*/ static int dir_leaf_namelist_count( void *obj, int startoff) { xfs_dir_leafblock_t *block; ASSERT(startoff == 0); block = obj; if (INT_GET(block->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) return 0; return INT_GET(block->hdr.count, ARCH_CONVERT); }
/*ARGSUSED*/ static int dir_node_btree_count( void *obj, int startoff) { xfs_da_intnode_t *block; ASSERT(startoff == 0); /* this is a base structure */ block = obj; if (INT_GET(block->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) return 0; return INT_GET(block->hdr.count, ARCH_CONVERT); }
/* * uuid_getnodeuniq - obtain the node unique fields of a UUID. * * This is not in any way a standard or condoned UUID function; * it just something that's needed for user-level file handles. */ void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) { char *uu=(char*)uuid; /* on IRIX, this function assumes big-endian fields within * the uuid, so we use INT_GET to get the same result on * little-endian systems */ fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) + INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT); fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT); }
STATIC int xfs_dqtest_cmp2( xfs_dqtest_t *d, xfs_dquot_t *dqp) { int err = 0; if (INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) != d->d_icount) { xfs_qm_dqtest_failed(d, dqp, "icount mismatch", INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), d->d_icount, 0); err++; } if (INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) != d->d_bcount) { xfs_qm_dqtest_failed(d, dqp, "bcount mismatch", INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), d->d_bcount, 0); err++; } if (INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT) && INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT)) { if (INT_ISZERO(dqp->q_core.d_btimer, ARCH_CONVERT) && !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { cmn_err(CE_DEBUG, "%d [%s] [0x%p] BLK TIMER NOT STARTED", d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); err++; } } if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) && INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { if (INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT)) { cmn_err(CE_DEBUG, "%d [%s] [0x%p] INO TIMER NOT STARTED", d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); err++; } } #ifdef QUOTADEBUG if (!err) { cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked", d->d_id, XFS_QM_ISUDQ(d) ? "USR" : "GRP", d->q_mount); } #endif return (err); }
/* * Look up a name in a leaf directory structure, replace the inode number. * This is the external routine. */ STATIC int xfs_dir_leaf_replace(xfs_da_args_t *args) { int index, retval; xfs_dabuf_t *bp; xfs_ino_t inum; xfs_dir_leafblock_t *leaf; xfs_dir_leaf_entry_t *entry; xfs_dir_leaf_name_t *namest; inum = args->inumber; retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, XFS_DATA_FORK); if (retval) return(retval); ASSERT(bp != NULL); retval = xfs_dir_leaf_lookup_int(bp, args, &index); if (retval == EEXIST) { leaf = bp->data; entry = &leaf->entries[index]; namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); /* XXX - replace assert? */ XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); xfs_da_buf_done(bp); retval = 0; } else xfs_da_brelse(args->trans, bp); return(retval); }
/* ARGSUSED */ void xfs_dqtrace_entry__( xfs_dquot_t *dqp, char *func, void *retaddr, xfs_inode_t *ip) { xfs_dquot_t *udqp = NULL; int ino; ASSERT(dqp->q_trace); if (ip) { ino = ip->i_ino; udqp = ip->i_udquot; } ktrace_enter(dqp->q_trace, (void *)(__psint_t)DQUOT_KTRACE_ENTRY, (void *)func, (void *)(__psint_t)dqp->q_nrefs, (void *)(__psint_t)dqp->dq_flags, (void *)(__psint_t)dqp->q_res_bcount, (void *)(__psint_t)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT), (void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT), /* 11 */ (void *)(__psint_t)current_pid(), (void *)(__psint_t)ino, (void *)(__psint_t)retaddr, (void *)(__psint_t)udqp); return; }
/* * Look up a filename in an int directory, replace the inode number. * Use an internal routine to actually do the lookup. */ STATIC int xfs_dir_node_replace(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_dir_leafblock_t *leaf; xfs_dir_leaf_entry_t *entry; xfs_dir_leaf_name_t *namest; xfs_ino_t inum; int retval, error, i; xfs_dabuf_t *bp; state = xfs_da_state_alloc(); state->args = args; state->mp = args->dp->i_mount; state->blocksize = state->mp->m_sb.sb_blocksize; inum = args->inumber; /* * Search to see if name exists, * and get back a pointer to it. */ error = xfs_da_node_lookup_int(state, &retval); if (error) { retval = error; } if (retval == EEXIST) { blk = &state->path.blk[state->path.active - 1]; ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC); bp = blk->bp; leaf = bp->data; entry = &leaf->entries[blk->index]; namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT)); /* XXX - replace assert ? */ XFS_DIR_SF_PUT_DIRINO_ARCH(&inum, &namest->inumber, ARCH_CONVERT); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber))); xfs_da_buf_done(bp); blk->bp = NULL; retval = 0; } else { i = state->path.active - 1; xfs_da_brelse(args->trans, state->path.blk[i].bp); state->path.blk[i].bp = NULL; } for (i = 0; i < state->path.active - 1; i++) { xfs_da_brelse(args->trans, state->path.blk[i].bp); state->path.blk[i].bp = NULL; } xfs_da_state_free(state); return(retval); }
/*ARGSUSED*/ static int dir_leaf_hdr_count( void *obj, int startoff) { xfs_dir_leafblock_t *block; ASSERT(startoff == 0); block = obj; return INT_GET(block->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC; }
STATIC void xfs_qm_dqtest_failed( xfs_dqtest_t *d, xfs_dquot_t *dqp, char *reason, xfs_qcnt_t a, xfs_qcnt_t b, int error) { qmtest_nfails++; if (error) cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s", INT_GET(d->d_id, ARCH_CONVERT), error, reason); else cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]", INT_GET(d->d_id, ARCH_CONVERT), reason, (int)a, (int)b); xfs_qm_dqtest_print(d); if (dqp) xfs_qm_dqprint(dqp); }
/*ARGSUSED*/ static int dir_node_hdr_count( void *obj, int startoff) { xfs_da_intnode_t *block; ASSERT(startoff == 0); block = obj; return INT_GET(block->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC; }
static int dir_leaf_name_count( void *obj, int startoff) { xfs_dir_leafblock_t *block; xfs_dir_leaf_entry_t *e; int i; int off; ASSERT(bitoffs(startoff) == 0); off = byteize(startoff); block = obj; if (INT_GET(block->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) return 0; for (i = 0; i < INT_GET(block->hdr.count, ARCH_CONVERT); i++) { e = &block->entries[i]; if (INT_GET(e->nameidx, ARCH_CONVERT) == off) return e->namelen; } return 0; }
void xfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2) { if (d1 && d2) { ASSERT(d1 != d2); if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) { xfs_dqlock(d2); xfs_dqlock(d1); } else { xfs_dqlock(d1); xfs_dqlock(d2); } } else { if (d1) { xfs_dqlock(d1); } else if (d2) { xfs_dqlock(d2); } } }
/* global_hdr_checksum_check - check the global media file header checksum. * utility function for use by drive-specific strategies. * returns BOOL_TRUE if ok, BOOL_FALSE if bad */ bool_t global_hdr_checksum_check( global_hdr_t *hdrp ) { u_int32_t *beginp = ( u_int32_t * )&hdrp[ 0 ]; u_int32_t *endp = ( u_int32_t * )&hdrp[ 1 ]; u_int32_t *p; u_int32_t accum; accum = 0; for ( p = beginp ; p < endp ; p++ ) { accum += INT_GET(*p, ARCH_CONVERT); } return accum == 0 ? BOOL_TRUE : BOOL_FALSE; }
/*ARGSUSED*/ static int dir_leaf_namelist_offset( void *obj, int startoff, int idx) { xfs_dir_leafblock_t *block; xfs_dir_leaf_entry_t *e; ASSERT(startoff == 0); block = obj; e = &block->entries[idx]; return bitize(INT_GET(e->nameidx, ARCH_CONVERT)); }
/* * Remove a name from the leaf directory structure * This is the external routine. */ STATIC int xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) { xfs_dir_leafblock_t *leaf; int index, retval; xfs_dabuf_t *bp; retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, XFS_DATA_FORK); if (retval) return(retval); ASSERT(bp != NULL); leaf = bp->data; ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); retval = xfs_dir_leaf_lookup_int(bp, args, &index); if (retval == EEXIST) { (void)xfs_dir_leaf_remove(args->trans, bp, index); *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); *totallen = INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); retval = 0; } xfs_da_buf_done(bp); return(retval); }
/* global_hdr_checksum_set - fill in the global media file header checksum. * utility function for use by drive-specific strategies. */ void global_hdr_checksum_set( global_hdr_t *hdrp ) { u_int32_t *beginp = ( u_int32_t * )&hdrp[ 0 ]; u_int32_t *endp = ( u_int32_t * )&hdrp[ 1 ]; u_int32_t *p; u_int32_t accum; hdrp->gh_checksum = 0; accum = 0; for ( p = beginp ; p < endp ; p++ ) { accum += INT_GET(*p, ARCH_CONVERT); } INT_SET(hdrp->gh_checksum, ARCH_CONVERT, (int32_t)(~accum + 1)); }
/*ARGSUSED*/ int dir_leaf_name_size( void *obj, int startoff, int idx) { xfs_dir_leafblock_t *block; xfs_dir_leaf_entry_t *e; ASSERT(startoff == 0); block = obj; if (INT_GET(block->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) return 0; e = &block->entries[idx]; return bitize((int)XFS_DIR_LEAF_ENTSIZE_BYENTRY(e)); }
static int bnobt_rec_offset( void *obj, int startoff, int idx) { xfs_alloc_block_t *block; xfs_alloc_rec_t *rp; ASSERT(startoff == 0); block = obj; ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) == 0); rp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, block, idx, XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1)); return bitize((int)((char *)rp - (char *)block)); }
/* * Initialize the dquot log item for a newly allocated dquot. * The dquot isn't locked at this point, but it isn't on any of the lists * either, so we don't care. */ void xfs_qm_dquot_logitem_init( struct xfs_dquot *dqp) { xfs_dq_logitem_t *lp; lp = &dqp->q_logitem; lp->qli_item.li_type = XFS_LI_DQUOT; lp->qli_item.li_ops = &xfs_dquot_item_ops; lp->qli_item.li_mountp = dqp->q_mount; lp->qli_dquot = dqp; lp->qli_format.qlf_type = XFS_LI_DQUOT; lp->qli_format.qlf_id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); lp->qli_format.qlf_blkno = dqp->q_blkno; lp->qli_format.qlf_len = 1; /* * This is just the offset of this dquot within its buffer * (which is currently 1 FSB and probably won't change). * Hence 32 bits for this offset should be just fine. * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) * here, and recompute it at recovery time. */ lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; }
/* * Lookup a dquot in the incore dquot hashtable. We keep two separate * hashtables for user and group dquots; and, these are global tables * inside the XQM, not per-filesystem tables. * The hash chain must be locked by caller, and it is left locked * on return. Returning dquot is locked. */ STATIC int xfs_qm_dqlookup( xfs_mount_t *mp, xfs_dqid_t id, xfs_dqhash_t *qh, xfs_dquot_t **O_dqpp) { xfs_dquot_t *dqp; uint flist_locked; xfs_dquot_t *d; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); flist_locked = B_FALSE; /* * Traverse the hashchain looking for a match */ for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { /* * We already have the hashlock. We don't need the * dqlock to look at the id field of the dquot, since the * id can't be modified without the hashlock anyway. */ if (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id && dqp->q_mount == mp) { xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); /* * All in core dquots must be on the dqlist of mp */ ASSERT(dqp->MPL_PREVP != NULL); xfs_dqlock(dqp); if (dqp->q_nrefs == 0) { ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); /* * We may have raced with dqreclaim_one() * (and lost). So, flag that we don't * want the dquot to be reclaimed. */ dqp->dq_flags |= XFS_DQ_WANT; xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); dqp->dq_flags &= ~(XFS_DQ_WANT); } flist_locked = B_TRUE; } /* * id couldn't have changed; we had the hashlock all * along */ ASSERT(INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id); if (flist_locked) { if (dqp->q_nrefs != 0) { xfs_qm_freelist_unlock(xfs_Gqm); flist_locked = B_FALSE; } else { /* * take it off the freelist */ xfs_dqtrace_entry(dqp, "DQLOOKUP: TAKEOFF FL"); XQM_FREELIST_REMOVE(dqp); /* xfs_qm_freelist_print(&(xfs_Gqm-> qm_dqfreelist), "after removal"); */ } } /* * grab a reference */ XFS_DQHOLD(dqp); if (flist_locked) xfs_qm_freelist_unlock(xfs_Gqm); /* * move the dquot to the front of the hashchain */ ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); if (dqp->HL_PREVP != &qh->qh_next) { xfs_dqtrace_entry(dqp, "DQLOOKUP: HASH MOVETOFRONT"); if ((d = dqp->HL_NEXT)) d->HL_PREVP = dqp->HL_PREVP; *(dqp->HL_PREVP) = d; d = qh->qh_next; d->HL_PREVP = &dqp->HL_NEXT; dqp->HL_NEXT = d; dqp->HL_PREVP = &qh->qh_next; qh->qh_next = dqp; } xfs_dqtrace_entry(dqp, "LOOKUP END"); *O_dqpp = dqp; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (0); } } *O_dqpp = NULL; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (1); }
/* * Convert a block format directory to shortform. * Caller has already checked that it will fit, and built us a header. */ int /* error */ xfs_dir2_block_to_sf( xfs_da_args_t *args, /* operation arguments */ xfs_dabuf_t *bp, /* block buffer */ int size, /* shortform directory size */ xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */ { xfs_dir2_block_t *block; /* block structure */ xfs_dir2_block_tail_t *btp; /* block tail pointer */ xfs_dir2_data_entry_t *dep; /* data entry pointer */ xfs_inode_t *dp; /* incore directory inode */ xfs_dir2_data_unused_t *dup; /* unused data pointer */ char *endptr; /* end of data entries */ int error; /* error return value */ int logflags; /* inode logging flags */ xfs_mount_t *mp; /* filesystem mount point */ char *ptr; /* current data pointer */ xfs_dir2_sf_entry_t *sfep; /* shortform entry */ xfs_dir2_sf_t *sfp; /* shortform structure */ xfs_ino_t temp; xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); dp = args->dp; mp = dp->i_mount; /* * Make a copy of the block data, so we can shrink the inode * and add local data. */ block = kmem_alloc(mp->m_dirblksize, KM_SLEEP); memcpy(block, bp->data, mp->m_dirblksize); logflags = XFS_ILOG_CORE; if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { ASSERT(error != ENOSPC); goto out; } /* * The buffer is now unconditionally gone, whether * xfs_dir2_shrink_inode worked or not. * * Convert the inode to local format. */ dp->i_df.if_flags &= ~XFS_IFEXTENTS; dp->i_df.if_flags |= XFS_IFINLINE; dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; ASSERT(dp->i_df.if_bytes == 0); xfs_idata_realloc(dp, size, XFS_DATA_FORK); logflags |= XFS_ILOG_DDATA; /* * Copy the header into the newly allocate local space. */ sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count)); dp->i_d.di_size = size; /* * Set up to loop over the block's entries. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); ptr = (char *)block->u; endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); /* * Loop over the active and unused entries. * Stop when we reach the leaf/tail portion of the block. */ while (ptr < endptr) { /* * If it's unused, just skip over it. */ dup = (xfs_dir2_data_unused_t *)ptr; if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { ptr += INT_GET(dup->length, ARCH_CONVERT); continue; } dep = (xfs_dir2_data_entry_t *)ptr; /* * Skip . */ if (dep->namelen == 1 && dep->name[0] == '.') ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == dp->i_ino); /* * Skip .., but make sure the inode number is right. */ else if (dep->namelen == 2 && dep->name[0] == '.' && dep->name[1] == '.') ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent)); /* * Normal entry, copy it into shortform. */ else { sfep->namelen = dep->namelen; XFS_DIR2_SF_PUT_OFFSET(sfep, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)block)); memcpy(sfep->name, dep->name, dep->namelen); temp=INT_GET(dep->inumber, ARCH_CONVERT); XFS_DIR2_SF_PUT_INUMBER(sfp, &temp, XFS_DIR2_SF_INUMBERP(sfep)); sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); } ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } ASSERT((char *)sfep - (char *)sfp == size); xfs_dir2_sf_check(args); out: xfs_trans_log_inode(args->trans, dp, logflags); kmem_free(block, mp->m_dirblksize); return error; }
/* * Given a block directory (dp/block), calculate its size as a shortform (sf) * directory and a header for the sf directory, if it will fit it the * space currently present in the inode. If it won't fit, the output * size is too big (but not accurate). */ int /* size for sf form */ xfs_dir2_block_sfsize( xfs_inode_t *dp, /* incore inode pointer */ xfs_dir2_block_t *block, /* block directory data */ xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */ { xfs_dir2_dataptr_t addr; /* data entry address */ xfs_dir2_leaf_entry_t *blp; /* leaf area of the block */ xfs_dir2_block_tail_t *btp; /* tail area of the block */ int count; /* shortform entry count */ xfs_dir2_data_entry_t *dep; /* data entry in the block */ int i; /* block entry index */ int i8count; /* count of big-inode entries */ int isdot; /* entry is "." */ int isdotdot; /* entry is ".." */ xfs_mount_t *mp; /* mount structure pointer */ int namelen; /* total name bytes */ xfs_ino_t parent = 0; /* parent inode number */ int size=0; /* total computed size */ mp = dp->i_mount; count = i8count = namelen = 0; btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); blp = XFS_DIR2_BLOCK_LEAF_P(btp); /* * Iterate over the block's data entries by using the leaf pointers. */ for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Calculate the pointer to the entry at hand. */ dep = (xfs_dir2_data_entry_t *) ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); /* * Detect . and .., so we can special-case them. * . is not included in sf directories. * .. is included by just the parent inode number. */ isdot = dep->namelen == 1 && dep->name[0] == '.'; isdotdot = dep->namelen == 2 && dep->name[0] == '.' && dep->name[1] == '.'; #if XFS_BIG_INUMS if (!isdot) i8count += INT_GET(dep->inumber, ARCH_CONVERT) > XFS_DIR2_MAX_SHORT_INUM; #endif if (!isdot && !isdotdot) { count++; namelen += dep->namelen; } else if (isdotdot) parent = INT_GET(dep->inumber, ARCH_CONVERT); /* * Calculate the new size, see if we should give up yet. */ /* Add by Jerry 2007/10/25 Fixed bug that XFS file system corrupted under Winthrax test. This is the "special" structure alignment on the ARM. Port a workaround solution from a patch on http://lists.arm.linux.org.uk/pipermail/linux-arm-kernel/2004-March/020287.html and http://www.nas-central.org/index.php/Buffalo_ARM9_Kernel_Port#XFS_Arm_Issues This patch was never accepted by XFS because the code is incorrect for all other platforms. */ #if 0 size = XFS_DIR2_SF_HDR_SIZE(i8count) + /* header */ count + /* namelen */ count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */ namelen + /* name */ (i8count ? /* inumber */ (uint)sizeof(xfs_dir2_ino8_t) * count : (uint)sizeof(xfs_dir2_ino4_t) * count); #else size = XFS_DIR2_SF_HDR_SIZE(i8count) /* header */ + namelen + (count * (sizeof(xfs_dir2_sf_entry_t) - 1)) - (count * (((i8count == 0) ? 1 : 0) * (sizeof(xfs_dir2_ino8_t) - sizeof(xfs_dir2_ino4_t)))); #endif if (size > XFS_IFORK_DSIZE(dp)) return size; /* size value is a failure */ } /* * Create the output header, if it worked. */ sfhp->count = count; sfhp->i8count = i8count; XFS_DIR2_SF_PUT_INUMBER((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent); return size; }
/* * Adjust quota limits, and start/stop timers accordingly. */ STATIC int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0) return (0); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } /* * We don't want to race with a quotaoff so take the quotaoff lock. * (We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); /* * Get the dquot (locked), and join it to the transaction. * Allocate the dquot if this doesn't exist. */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); ASSERT(error != ENOENT); return (error); } xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_bhardlimit = hard; mp->m_quotainfo->qi_bsoftlimit = soft; } } else { qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_rtbhardlimit = hard; mp->m_quotainfo->qi_rtbsoftlimit = soft; } } else { qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_ihardlimit = hard; mp->m_quotainfo->qi_isoftlimit = soft; } } else { qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); } if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above). */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; INT_SET(ddq->d_btimer, ARCH_CONVERT, newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; INT_SET(ddq->d_itimer, ARCH_CONVERT, newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; INT_SET(ddq->d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer); } } else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); xfs_trans_commit(tp, 0, NULL); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); return (0); }
STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp, dirent_t *dbp, xfs_dir_put_t put) { xfs_da_intnode_t *node; xfs_da_node_entry_t *btree; xfs_dir_leafblock_t *leaf; xfs_dablk_t bno, nextbno; xfs_dahash_t cookhash; xfs_mount_t *mp; int error, eob, i; xfs_dabuf_t *bp; xfs_daddr_t nextda; /* * Pick up our context. */ mp = dp->i_mount; bp = NULL; bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset); cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); xfs_dir_trace_g_du("node: start", dp, uio); /* * Re-find our place, even if we're confused about what our place is. * * First we check the block number from the magic cookie, it is a * cache of where we ended last time. If we find a leaf block, and * the starting hashval in that block is less than our desired * hashval, then we run with it. */ if (bno > 0) { error = xfs_da_read_buf(trans, dp, bno, -1, &bp, XFS_DATA_FORK); if ((error != 0) && (error != EFSCORRUPTED)) return(error); if (bp) leaf = bp->data; if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dub("node: block not a leaf", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) { xfs_dir_trace_g_dub("node: leaf hash too large", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } if (bp && cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) { xfs_dir_trace_g_dub("node: leaf hash too small", dp, uio, bno); xfs_da_brelse(trans, bp); bp = NULL; } } /* * If we did not find a leaf block from the blockno in the cookie, * or we there was no blockno in the cookie (eg: first time thru), * the we start at the top of the Btree and re-find our hashval. */ if (bp == NULL) { xfs_dir_trace_g_du("node: start at root" , dp, uio); bno = 0; for (;;) { error = xfs_da_read_buf(trans, dp, bno, -1, &bp, XFS_DATA_FORK); if (error) return(error); if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); node = bp->data; if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) break; btree = &node->btree[0]; xfs_dir_trace_g_dun("node: node detail", dp, uio, node); for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { bno = INT_GET(btree->before, ARCH_CONVERT); break; } } if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { xfs_da_brelse(trans, bp); xfs_dir_trace_g_du("node: hash beyond EOF", dp, uio); uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); *eofp = 1; return(0); } xfs_dir_trace_g_dub("node: going to block", dp, uio, bno); xfs_da_brelse(trans, bp); } } ASSERT(cookhash != XFS_DA_MAXHASH); /* * We've dropped down to the (first) leaf block that contains the * hashval we are interested in. Continue rolling upward thru the * leaf blocks until we fill up our buffer. */ for (;;) { leaf = bp->data; if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); xfs_da_brelse(trans, bp); return XFS_ERROR(EFSCORRUPTED); } xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); if (nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) { nextda = xfs_da_reada_buf(trans, dp, nextbno, XFS_DATA_FORK); } else nextda = -1; error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp, put, nextda); xfs_da_brelse(trans, bp); bno = nextbno; if (eob) { xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno); *eofp = 0; return(error); } if (bno == 0) break; error = xfs_da_read_buf(trans, dp, bno, nextda, &bp, XFS_DATA_FORK); if (error) return(error); if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); } *eofp = 1; xfs_dir_trace_g_du("node: E-O-F", dp, uio); return(0); }
/* * Translate an internal style on-disk-dquot to the exportable format. * The main differences are that the counters/limits are all in Basic * Blocks (BBs) instead of the internal FSBs, and all on-disk data has * to be converted to the native endianness. */ STATIC void xfs_qm_export_dquot( xfs_mount_t *mp, xfs_disk_dquot_t *src, struct fs_disk_quota *dst) { memset(dst, 0, sizeof(*dst)); dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ dst->d_flags = xfs_qm_export_qtype_flags(INT_GET(src->d_flags, ARCH_CONVERT)); dst->d_id = INT_GET(src->d_id, ARCH_CONVERT); dst->d_blk_hardlimit = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_hardlimit, ARCH_CONVERT)); dst->d_blk_softlimit = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_blk_softlimit, ARCH_CONVERT)); dst->d_ino_hardlimit = (__uint64_t) INT_GET(src->d_ino_hardlimit, ARCH_CONVERT); dst->d_ino_softlimit = (__uint64_t) INT_GET(src->d_ino_softlimit, ARCH_CONVERT); dst->d_bcount = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_bcount, ARCH_CONVERT)); dst->d_icount = (__uint64_t) INT_GET(src->d_icount, ARCH_CONVERT); dst->d_btimer = (__uint32_t) INT_GET(src->d_btimer, ARCH_CONVERT); dst->d_itimer = (__uint32_t) INT_GET(src->d_itimer, ARCH_CONVERT); dst->d_iwarns = INT_GET(src->d_iwarns, ARCH_CONVERT); dst->d_bwarns = INT_GET(src->d_bwarns, ARCH_CONVERT); dst->d_rtb_hardlimit = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_hardlimit, ARCH_CONVERT)); dst->d_rtb_softlimit = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_rtb_softlimit, ARCH_CONVERT)); dst->d_rtbcount = (__uint64_t) XFS_FSB_TO_BB(mp, INT_GET(src->d_rtbcount, ARCH_CONVERT)); dst->d_rtbtimer = (__uint32_t) INT_GET(src->d_rtbtimer, ARCH_CONVERT); dst->d_rtbwarns = INT_GET(src->d_rtbwarns, ARCH_CONVERT); /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the userlevel code, * so return zeroes in that case. */ if (! XFS_IS_QUOTA_ENFORCED(mp)) { dst->d_btimer = 0; dst->d_itimer = 0; dst->d_rtbtimer = 0; } #ifdef DEBUG if (XFS_IS_QUOTA_ENFORCED(mp) && dst->d_id != 0) { if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && (dst->d_blk_softlimit > 0)) { ASSERT(dst->d_btimer != 0); } if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { ASSERT(dst->d_itimer != 0); } } #endif }
/* * Called by xfs_trans_commit() and similar in spirit to * xfs_trans_apply_sb_deltas(). * Go thru all the dquots belonging to this transaction and modify the * INCORE dquot to reflect the actual usages. * Unreserve just the reservations done by this transaction * dquot is still left locked at exit. */ void xfs_trans_apply_dquot_deltas( xfs_trans_t *tp) { int i, j; xfs_dquot_t *dqp; xfs_dqtrx_t *qtrx, *qa; xfs_disk_dquot_t *d; long totalbdelta; long totalrtbdelta; ASSERT(tp->t_dqinfo); qa = tp->t_dqinfo->dqa_usrdquots; for (j = 0; j < 2; j++) { if (qa[0].qt_dquot == NULL) { qa = tp->t_dqinfo->dqa_grpdquots; continue; } /* * Lock all of the dquots and join them to the transaction. */ xfs_trans_dqlockedjoin(tp, qa); for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * The array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); /* * adjust the actual number of blocks used */ d = &dqp->q_core; /* * The issue here is - sometimes we don't make a blkquota * reservation intentionally to be fair to users * (when the amount is small). On the other hand, * delayed allocs do make reservations, but that's * outside of a transaction, so we have no * idea how much was really reserved. * So, here we've accumulated delayed allocation blks and * non-delay blks. The assumption is that the * delayed ones are always reserved (outside of a * transaction), and the others may or may not have * quota reservations. */ totalbdelta = qtrx->qt_bcount_delta + qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; #ifdef QUOTADEBUG if (totalbdelta < 0) ASSERT(INT_GET(d->d_bcount, ARCH_CONVERT) >= (xfs_qcnt_t) -totalbdelta); if (totalrtbdelta < 0) ASSERT(INT_GET(d->d_rtbcount, ARCH_CONVERT) >= (xfs_qcnt_t) -totalrtbdelta); if (qtrx->qt_icount_delta < 0) ASSERT(INT_GET(d->d_icount, ARCH_CONVERT) >= (xfs_qcnt_t) -qtrx->qt_icount_delta); #endif if (totalbdelta) INT_MOD(d->d_bcount, ARCH_CONVERT, (xfs_qcnt_t)totalbdelta); if (qtrx->qt_icount_delta) INT_MOD(d->d_icount, ARCH_CONVERT, (xfs_qcnt_t)qtrx->qt_icount_delta); if (totalrtbdelta) INT_MOD(d->d_rtbcount, ARCH_CONVERT, (xfs_qcnt_t)totalrtbdelta); /* * Start/reset the timer(s) if needed. */ xfs_qm_adjust_dqtimers(tp->t_mountp, d); dqp->dq_flags |= XFS_DQ_DIRTY; /* * add this to the list of items to get logged */ xfs_trans_log_dquot(tp, dqp); /* * Take off what's left of the original reservation. * In case of delayed allocations, there's no * reservation that a transaction structure knows of. */ if (qtrx->qt_blk_res != 0) { if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { if (qtrx->qt_blk_res > qtrx->qt_blk_res_used) dqp->q_res_bcount -= (xfs_qcnt_t) (qtrx->qt_blk_res - qtrx->qt_blk_res_used); else dqp->q_res_bcount -= (xfs_qcnt_t) (qtrx->qt_blk_res_used - qtrx->qt_blk_res); } } else { /* * These blks were never reserved, either inside * a transaction or outside one (in a delayed * allocation). Also, this isn't always a * negative number since we sometimes * deliberately skip quota reservations. */ if (qtrx->qt_bcount_delta) { dqp->q_res_bcount += (xfs_qcnt_t)qtrx->qt_bcount_delta; } } /* * Adjust the RT reservation. */ if (qtrx->qt_rtblk_res != 0) { if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { if (qtrx->qt_rtblk_res > qtrx->qt_rtblk_res_used) dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res - qtrx->qt_rtblk_res_used); else dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res_used - qtrx->qt_rtblk_res); } } else { if (qtrx->qt_rtbcount_delta) dqp->q_res_rtbcount += (xfs_qcnt_t)qtrx->qt_rtbcount_delta; } /* * Adjust the inode reservation. */ if (qtrx->qt_ino_res != 0) { ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) dqp->q_res_icount -= (xfs_qcnt_t) (qtrx->qt_ino_res - qtrx->qt_ino_res_used); } else { if (qtrx->qt_icount_delta) dqp->q_res_icount += (xfs_qcnt_t)qtrx->qt_icount_delta; } #ifdef QUOTADEBUG if (qtrx->qt_rtblk_res != 0) printk("RT res %d for 0x%p\n", (int) qtrx->qt_rtblk_res, dqp); #endif ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); } /* * Do the group quotas next */ qa = tp->t_dqinfo->dqa_grpdquots; } }
/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. * Returns EDQUOT if quota is exceeded. */ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { int error; xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t btimer; xfs_qcnt_t *resbcountp; if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqlock(dqp); } ASSERT(XFS_DQ_IS_LOCKED(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT); softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT); btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT); resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT); softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT); btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT); resbcountp = &dqp->q_res_rtbcount; } error = 0; if ((flags & XFS_QMOPT_FORCE_RES) == 0 && !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) && XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) { #ifdef QUOTADEBUG printk("BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?\n", nblks, *resbcountp, hardlimit); #endif if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ if (hardlimit > 0ULL && (hardlimit <= nblks + *resbcountp)) { error = EDQUOT; goto error_return; } if (softlimit > 0ULL && (softlimit <= nblks + *resbcountp)) { /* * If timer or warnings has expired, * return EDQUOT */ if ((btimer != 0 && CURRENT_TIME > btimer) || (!INT_ISZERO(dqp->q_core.d_bwarns, ARCH_CONVERT) && INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >= XFS_QI_BWARNLIMIT(dqp->q_mount))) { error = EDQUOT; goto error_return; } } } if (ninos > 0) { if (INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) > 0ULL && INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)) { error = EDQUOT; goto error_return; } else if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) > 0ULL && INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { /* * If timer or warnings has expired, * return EDQUOT */ if ((!INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && CURRENT_TIME > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) || (!INT_ISZERO(dqp->q_core.d_iwarns, ARCH_CONVERT) && INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >= XFS_QI_IWARNLIMIT(dqp->q_mount))) { error = EDQUOT; goto error_return; } } } } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); error_return: if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqunlock(dqp); } return (error); }