/* * Set the EA with the ACL and do endian conversion. */ STATIC void xfs_acl_set_attr( xfs_vnode_t *vp, xfs_acl_t *aclp, int kind, int *error) { xfs_acl_entry_t *ace, *newace, *end; xfs_acl_t *newacl; int len; if (!(_ACL_ALLOC(newacl))) { *error = ENOMEM; return; } len = sizeof(xfs_acl_t) - (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt)); end = &aclp->acl_entry[0]+aclp->acl_cnt; for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0]; ace < end; ace++, newace++) { INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag); INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id); INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); } INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); XVOP_ATTR_SET(vp, kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT, (char *)newacl, len, ATTR_ROOT, sys_cred, *error); _ACL_FREE(newacl); }
/* * Check the limits and timers of a dquot and start or reset timers * if necessary. * This gets called even when quota enforcement is OFF, which makes our * life a little less complicated. (We just don't reject any quota * reservations in that case, when enforcement is off). * We also return 0 as the values of the timers in Q_GETQUOTA calls, when * enforcement's off. * In contrast, warnings are a little different in that they don't * 'automatically' get started when limits get exceeded. */ void xfs_qm_adjust_dqtimers( xfs_mount_t *mp, xfs_disk_dquot_t *d) { /* * The dquot had better be locked. We are modifying it here. */ /* * root's limits are not real limits. */ if (INT_ISZERO(d->d_id, ARCH_CONVERT)) return; #ifdef QUOTADEBUG if (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)) ASSERT(INT_GET(d->d_blk_softlimit, ARCH_CONVERT) <= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)); if (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)) ASSERT(INT_GET(d->d_ino_softlimit, ARCH_CONVERT) <= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)); #endif if (INT_ISZERO(d->d_btimer, ARCH_CONVERT)) { if ((INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) || (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT) && (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { INT_SET(d->d_btimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_BTIMELIMIT(mp)); } } else { if ((INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) && (INT_ISZERO(d->d_blk_hardlimit, ARCH_CONVERT) || (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { INT_ZERO(d->d_btimer, ARCH_CONVERT); } } if (INT_ISZERO(d->d_itimer, ARCH_CONVERT)) { if ((INT_GET(d->d_ino_softlimit, ARCH_CONVERT) && (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) || (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT) && (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { INT_SET(d->d_itimer, ARCH_CONVERT, CURRENT_TIME + XFS_QI_ITIMELIMIT(mp)); } } else { if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT) || (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) && (INT_ISZERO(d->d_ino_hardlimit, ARCH_CONVERT) || (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { INT_ZERO(d->d_itimer, ARCH_CONVERT); } } }
/* * Do ACL endian conversion. */ STATIC void xfs_acl_get_endian( xfs_acl_t *aclp) { xfs_acl_entry_t *ace, *end; INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); end = &aclp->acl_entry[0]+aclp->acl_cnt; for (ace = &aclp->acl_entry[0]; ace < end; ace++) { INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag); INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id); INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm); } }
/* * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. */ STATIC void xfs_qm_dqinit_core( xfs_dqid_t id, uint type, xfs_dqblk_t *d) { /* * Caller has zero'd the entire dquot 'chunk' already. */ INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC); INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION); INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id); INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type); }
/* * Allocate and initialize a dquot. We don't always allocate fresh memory; * we try to reclaim a free dquot if the number of incore dquots are above * a threshold. * The only field inside the core that gets initialized at this point * is the d_id field. The idea is to fill in the entire q_core * when we read in the on disk dquot. */ xfs_dquot_t * xfs_qm_dqinit( xfs_mount_t *mp, xfs_dqid_t id, uint type) { xfs_dquot_t *dqp; boolean_t brandnewdquot; brandnewdquot = xfs_qm_dqalloc_incore(&dqp); dqp->dq_flags = type; INT_SET(dqp->q_core.d_id, ARCH_CONVERT, id); dqp->q_mount = mp; /* * No need to re-initialize these if this is a reclaimed dquot. */ if (brandnewdquot) { dqp->dq_flnext = dqp->dq_flprev = dqp; mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq"); initnsema(&dqp->q_flock, 1, "fdq"); sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); #ifdef XFS_DQUOT_TRACE dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); xfs_dqtrace_entry(dqp, "DQINIT"); #endif } else { /* * Only the q_core portion was zeroed in dqreclaim_one(). * So, we need to reset others. */ dqp->q_nrefs = 0; dqp->q_blkno = 0; dqp->MPL_NEXT = dqp->HL_NEXT = NULL; dqp->HL_PREVP = dqp->MPL_PREVP = NULL; dqp->q_bufoffset = 0; dqp->q_fileoffset = 0; dqp->q_transp = NULL; dqp->q_gdquot = NULL; dqp->q_res_bcount = 0; dqp->q_res_icount = 0; dqp->q_res_rtbcount = 0; dqp->q_pincount = 0; dqp->q_hash = NULL; ASSERT(dqp->dq_flnext == dqp->dq_flprev); #ifdef XFS_DQUOT_TRACE ASSERT(dqp->q_trace); xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); #endif } /* * log item gets initialized later */ return (dqp); }
/* global_hdr_checksum_set - fill in the global media file header checksum. * utility function for use by drive-specific strategies. */ void global_hdr_checksum_set( global_hdr_t *hdrp ) { u_int32_t *beginp = ( u_int32_t * )&hdrp[ 0 ]; u_int32_t *endp = ( u_int32_t * )&hdrp[ 1 ]; u_int32_t *p; u_int32_t accum; hdrp->gh_checksum = 0; accum = 0; for ( p = beginp ; p < endp ; p++ ) { accum += INT_GET(*p, ARCH_CONVERT); } INT_SET(hdrp->gh_checksum, ARCH_CONVERT, (int32_t)(~accum + 1)); }
/* * If default limits are in force, push them into the dquot now. * We overwrite the dquot limits only if they are zero and this * is not the root dquot. */ void xfs_qm_adjust_dqlimits( xfs_mount_t *mp, xfs_disk_dquot_t *d) { xfs_quotainfo_t *q = mp->m_quotainfo; ASSERT(!INT_ISZERO(d->d_id, ARCH_CONVERT)); if (q->qi_bsoftlimit && INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT)) INT_SET(d->d_blk_softlimit, ARCH_CONVERT, q->qi_bsoftlimit); if (q->qi_bhardlimit && INT_ISZERO(d->d_blk_hardlimit, ARCH_CONVERT)) INT_SET(d->d_blk_hardlimit, ARCH_CONVERT, q->qi_bhardlimit); if (q->qi_isoftlimit && INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT)) INT_SET(d->d_ino_softlimit, ARCH_CONVERT, q->qi_isoftlimit); if (q->qi_ihardlimit && INT_ISZERO(d->d_ino_hardlimit, ARCH_CONVERT)) INT_SET(d->d_ino_hardlimit, ARCH_CONVERT, q->qi_ihardlimit); if (q->qi_rtbsoftlimit && INT_ISZERO(d->d_rtb_softlimit, ARCH_CONVERT)) INT_SET(d->d_rtb_softlimit, ARCH_CONVERT, q->qi_rtbsoftlimit); if (q->qi_rtbhardlimit && INT_ISZERO(d->d_rtb_hardlimit, ARCH_CONVERT)) INT_SET(d->d_rtb_hardlimit, ARCH_CONVERT, q->qi_rtbhardlimit); }
/* * Adjust quota limits, and start/stop timers accordingly. */ STATIC int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0) return (0); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } /* * We don't want to race with a quotaoff so take the quotaoff lock. * (We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); /* * Get the dquot (locked), and join it to the transaction. * Allocate the dquot if this doesn't exist. */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); ASSERT(error != ENOENT); return (error); } xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : INT_GET(ddq->d_blk_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_blk_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_blk_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_bhardlimit = hard; mp->m_quotainfo->qi_bsoftlimit = soft; } } else { qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : INT_GET(ddq->d_rtb_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_rtb_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_rtb_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_rtbhardlimit = hard; mp->m_quotainfo->qi_rtbsoftlimit = soft; } } else { qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : INT_GET(ddq->d_ino_hardlimit, ARCH_CONVERT); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT); if (hard == 0 || hard >= soft) { INT_SET(ddq->d_ino_hardlimit, ARCH_CONVERT, hard); INT_SET(ddq->d_ino_softlimit, ARCH_CONVERT, soft); if (id == 0) { mp->m_quotainfo->qi_ihardlimit = hard; mp->m_quotainfo->qi_isoftlimit = soft; } } else { qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); } if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above). */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; INT_SET(ddq->d_btimer, ARCH_CONVERT, newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; INT_SET(ddq->d_itimer, ARCH_CONVERT, newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; INT_SET(ddq->d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer); } } else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); xfs_trans_commit(tp, 0, NULL); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); return (0); }
/* s = service the command was sent to u = user the command was sent from */ void ns_group(IRC_User *s, IRC_User *u) { u_int32_t source_snid; u_int32_t snid; char *cmd; char *gname; char *nick; int memberc = 0; u_int32_t master_sgid; u_int32_t sgid; CHECK_IF_IDENTIFIED_NICK cmd = strtok(NULL, " "); gname = strtok(NULL, " "); /* base syntax validation */ if(IsNull(cmd)) send_lang(u, s, NS_GROUP_SYNTAX); else if(strcasecmp(cmd,"CREATE") == 0) { char *master; char *gdesc; char *umodes = NULL; master = strtok(NULL, " "); gdesc = strtok(NULL, ""); if(gname) /* first check if the name contains umodes */ { char *pumodes; char *eumodes; pumodes = strchr(gname,'['); if(pumodes && pumodes[0]) { *(pumodes++) = '\0'; eumodes = strchr(pumodes,']'); if(eumodes) { *eumodes = '\0'; umodes = pumodes; } } } /* syntax validation */ if(IsNull(gname) || IsNull(master)) send_lang(u, s, NS_GROUP_CREATE_SYNTAX); /* permissions validation */ else if(!is_sroot(source_snid)) send_lang(u, s, NICK_NOT_ROOT); /* check requirements */ else if((master_sgid = find_group(master)) == 0) send_lang(u, s, NS_GROUP_MASTER_NOT_FOUND, master); /* avoid duplicates */ else if((sgid = find_group(gname)) != 0) send_lang(u, s, NS_GROUP_ALREADY_EXISTS, gname); /* execute operation */ else if(group_create(gname, master_sgid, gdesc, umodes) > 0) /* report operation status */ send_lang(u, s, NS_GROUP_CREATE_OK, gname); else send_lang(u, s, UPDATE_FAIL); } else if(strcasecmp(cmd,"ADD") == 0) { u_int32_t duration = 0; time_t master_expire = 0; u_int32_t is_master_sgid; char *duration_str; nick = strtok(NULL, " "); duration_str = strtok(NULL, " "); if(duration_str) duration = time_str(duration_str); /* syntax validation */ if(IsNull(gname) || IsNull(nick)) send_lang(u, s, NS_GROUP_ADD_SYNTAX); /* check requirements */ else if((snid = nick2snid(nick)) == 0) send_lang(u, s, NO_SUCH_NICK_X, nick); else if((sgid = find_group(gname)) == 0) send_lang(u, s, NO_SUCH_GROUP_X, gname); /* privileges validation */ else if(group_is_full(sgid)) send_lang(u, s, NS_GROUP_IS_FULL_X); else if(((is_master_sgid = is_master(source_snid, sgid))== 0) && !is_sroot(source_snid)) send_lang(u, s, NOT_MASTER_OF_X, gname); /* avoid duplicates */ else if(sql_singlequery("SELECT t_expire FROM ns_group_users " " WHERE sgid=%d AND snid=%d", is_master_sgid, source_snid) && (master_expire = sql_field_i(0)) && duration) send_lang(u, s, NS_GROUP_CANT_DEFINE_TIME_X, gname); else if(is_member_of(snid, sgid)) send_lang(u, s, NICK_X_ALREADY_ON_X, nick, gname); /* execute operation */ else { time_t t_expire = 0; if(master_expire) t_expire = master_expire; else if(duration) t_expire = irc_CurrentTime + duration; if(add_to_group(sgid, snid, t_expire) > 0) /* report operation status */ { char *server = strchr(gname, '@'); IRC_User *user = irc_FindUser(nick); send_lang(u, s, NICK_ADDED_X_X, nick, gname); if(server) /* we have a server rule to be validated */ ++server; if(user && (!server || (strcasecmp(server,u->server->sname) == 0))) { if(user->extra[ED_GROUPS] == NULL) { user->extra[ED_GROUPS] = malloc(sizeof(darray)); array_init(user->extra[ED_GROUPS], 1, DA_INT); } array_add_int(user->extra[ED_GROUPS], sgid); } } else send_lang(u, s, UPDATE_FAIL); } } else if(strcasecmp(cmd,"DEL") == 0) { nick = strtok(NULL, " "); /* syntax validation */ if(IsNull(gname) || IsNull(nick)) send_lang(u, s, NS_GROUP_DEL_SYNTAX); /* check requirements */ else if((sgid = find_group(gname)) == 0) send_lang(u, s, NO_SUCH_GROUP_X, gname); else if((snid = nick2snid(nick)) == 0) send_lang(u, s, NO_SUCH_NICK_X, nick); /* privileges validation */ else if(!is_sroot(source_snid) && !is_master(source_snid, sgid)) send_lang(u, s, NOT_MASTER_OF_X, gname); else if(!is_member_of(snid, sgid)) send_lang(u, s, NICK_X_NOT_ON_GROUP_X, nick, gname); /* execute operation */ else if(del_from_group(sgid, snid) > 0) /* report operation status */ { IRC_User *user = irc_FindUser(nick); send_lang(u, s, NICK_DEL_X_X, nick, gname); if(user) array_del_int(user->extra[ED_GROUPS], sgid); } else send_lang(u, s, UPDATE_FAIL); } else if(strcasecmp(cmd,"INFO") == 0) { /* syntax validation */ if(IsNull(gname)) send_lang(u, s, NS_GROUP_INFO_SYNTAX); /* check requirements */ else if((sgid = find_group(gname)) == 0) send_lang(u, s, NO_SUCH_GROUP_X, gname); /* check privileges */ else if(!is_master(source_snid, sgid) && !is_member_of(source_snid, sgid)) send_lang(u, s, NOT_MASTER_OR_MEMBER_X, gname); else if((sgid = find_group(gname))) /* we need to get the group description */ { /* execute operation */ MYSQL_RES* res; master_sgid = 0; sql_singlequery("SELECT gdesc, master_sgid FROM ns_group WHERE sgid=%d", sgid); send_lang(u, s, NS_GROUP_INFO_X, gname); if(sql_field(0)) send_lang(u, s, NS_GROUP_INFO_DESC_X, sql_field(0)); master_sgid = sql_field_i(1); if(master_sgid != 0) { if(sql_singlequery("SELECT name FROM ns_group WHERE sgid=%d", master_sgid) > 0) { send_lang(u, s, NS_GROUP_INFO_MASTER_X, sql_field(0)); } } res = sql_query("SELECT n.nick, gm.t_expire FROM " "nickserv n, ns_group_users gm WHERE gm.sgid=%d AND n.snid=gm.snid", sgid); if(sql_next_row(res) == NULL) send_lang(u, s, NS_GROUP_EMPTY); else { do { char buf[64]; struct tm *tm; time_t t_expire = sql_field_i(1); buf[0] = '\0'; if(t_expire) { tm = localtime(&t_expire); strftime(buf, sizeof(buf), format_str(u, DATE_FORMAT), tm); send_lang(u,s, NS_GROUP_ITEM_X_X, sql_field(0), buf); } else send_lang(u,s, NS_GROUP_ITEM_X, sql_field(0)); ++memberc; } while(sql_next_row(res)); send_lang(u, s, NS_GROUP_MEMBERS_TAIL_X, memberc); } sql_free(res); } } else if(strcasecmp(cmd,"DROP") == 0) { /* syntax validation */ if(IsNull(gname)) send_lang(u, s, NS_GROUP_DROP_SYNTAX); /* privileges validation */ else if(!is_sroot(source_snid)) send_lang(u, s, NICK_NOT_ROOT); /* check requirements */ else if((sgid = find_group(gname)) == 0) send_lang(u, s, NO_SUCH_GROUP_X, gname); /* NOTE: The following sql_field( depends on previous find_group( */ else if(!sql_field(2) || (master_sgid = atoi(sql_field(2))) == 0) send_lang(u, s, CANT_DROP_ROOT); /* execute operation */ else if(drop_group(sgid)>0) /* report operation status */ send_lang(u, s, NS_GROUP_DROPPED_X, gname); else send_lang(u, s, UPDATE_FAIL); } else if(strcasecmp(cmd,"LIST") == 0) /* List groups */ { MYSQL_RES* res; MYSQL_ROW row; /* privileges validation */ if(!is_sroot(source_snid)) send_lang(u, s, NICK_NOT_ROOT); else { res = sql_query("SELECT name, master_sgid, gdesc FROM ns_group"); send_lang(u, s, NS_GROUP_LIST_HEADER); while((row = sql_next_row(res))) { char* mname = ""; if(row[1] && sql_singlequery("SELECT name FROM ns_group WHERE sgid=%d", atoi(row[1])) > 0) mname = sql_field(0); send_lang(u, s, NS_GROUP_LIST_X_X_X, row[0], mname, row[2] ? row[2] : ""); } send_lang(u, s, NS_GROUP_LIST_TAIL); sql_free(res); } } else if(strcasecmp(cmd,"SHOW") == 0) /* Show groups we belong to */ { /* groups count */ int gc = array_count(u->extra[ED_GROUPS]); if(gc == 0) send_lang(u, s, NO_GROUPS); else { MYSQL_RES *res; MYSQL_ROW row; char buf[64]; struct tm *tm; time_t t_expire; #if 0 int i; u_int32_t* data = array_data_int(u->extra[ED_GROUPS]); #endif send_lang(u, s, NS_GROUP_SHOW_HEADER); #if 0 for(i = 0; i < gc; ++i) { if(sql_singlequery("SELECT name,gdesc FROM ns_group WHERE sgid=%d", data[i]) > 0 ) send_lang(u, s, NS_GROUP_SHOW_X_X, sql_field(0), sql_field(1) ? sql_field(1) : ""); } #endif res = sql_query("SELECT g.name, g.gdesc, gu.t_expire FROM ns_group g, ns_group_users gu" " WHERE gu.snid=%d AND g.sgid=gu.sgid ORDER BY g.master_sgid", source_snid); while((row = sql_next_row(res))) { t_expire = sql_field_i(2); buf[0] = '\0'; if(t_expire) { tm = localtime(&t_expire); strftime(buf, sizeof(buf), format_str(u, DATE_FORMAT), tm); send_lang(u,s, NS_GROUP_SHOW_X_X_X, row[0], row[1] ? row[1] : "", buf); } else send_lang(u, s, NS_GROUP_SHOW_X_X, row[0], row[1] ? row[1] : ""); } send_lang(u, s, NS_GROUP_SHOW_TAIL); sql_free(res); } } else if(strcasecmp(cmd,"SET") == 0) { char *option; char *value ; option = strtok(NULL, " "); value = strtok(NULL, " "); /* syntax validation */ if(IsNull(gname) || IsNull(option)) send_lang(u, s, NS_GROUP_SET_SYNTAX); /* privileges validation */ else if(!is_sroot(source_snid)) send_lang(u, s, NICK_NOT_ROOT); /* check requirements */ else if((sgid = find_group(gname)) == 0) send_lang(u, s, NO_SUCH_GROUP_X, gname); else { if(strcasecmp(option,"AUTOMODES") == 0) STRING_SET("autoumodes", AUTOMODES_X_UNSET, AUTOMODES_X_CHANGED_TO_X) else if(strcasecmp(option,"DESC") == 0) STRING_SET("gdesc", DESC_X_UNSET, DESC_X_CHANGED_TO_X) else if(strcasecmp(option, "MAXUSERS") == 0) INT_SET("maxusers", NS_GROUP_SET_MAXUSERS_SET_X_X) else send_lang(u, s, SET_INVALID_OPTION_X, option); } }
int bz_match_score( int np, struct xyt_struct * pstruct, struct xyt_struct * gstruct ) { int kx, kq; int ftt; int tot; int qh; int tp; int ll, jj, kk, n, t, b; int k, i, j, ii, z; int kz, l; int p1, p2; int dw, ww; int match_score; int qq_overflow = 0; float fi; /* These next 3 arrays originally declared global, but moved here */ /* locally because they are only used herein */ int rr[ RR_SIZE ]; int avn[ AVN_SIZE ]; int avv[ AVV_SIZE_1 ][ AVV_SIZE_2 ]; /* These now externally defined in bozorth.h */ /* extern FILE * stderr; */ /* extern char * get_progname( void ); */ /* extern char * get_probe_filename( void ); */ /* extern char * get_gallery_filename( void ); */ if ( pstruct->nrows < MIN_COMPUTABLE_BOZORTH_MINUTIAE ) { #ifndef NOVERBOSE if ( gstruct->nrows < MIN_COMPUTABLE_BOZORTH_MINUTIAE ) { if ( verbose_bozorth ) fprintf( stderr, "%s: bz_match_score(): both probe and gallery file have too few minutiae (%d,%d) to compute a real Bozorth match score; min. is %d [p=%s; g=%s]\n", get_progname(), pstruct->nrows, gstruct->nrows, MIN_COMPUTABLE_BOZORTH_MINUTIAE, get_probe_filename(), get_gallery_filename() ); } else { if ( verbose_bozorth ) fprintf( stderr, "%s: bz_match_score(): probe file has too few minutiae (%d) to compute a real Bozorth match score; min. is %d [p=%s; g=%s]\n", get_progname(), pstruct->nrows, MIN_COMPUTABLE_BOZORTH_MINUTIAE, get_probe_filename(), get_gallery_filename() ); } #endif return ZERO_MATCH_SCORE; } if ( gstruct->nrows < MIN_COMPUTABLE_BOZORTH_MINUTIAE ) { #ifndef NOVERBOSE if ( verbose_bozorth ) fprintf( stderr, "%s: bz_match_score(): gallery file has too few minutiae (%d) to compute a real Bozorth match score; min. is %d [p=%s; g=%s]\n", get_progname(), gstruct->nrows, MIN_COMPUTABLE_BOZORTH_MINUTIAE, get_probe_filename(), get_gallery_filename() ); #endif return ZERO_MATCH_SCORE; } /* initialize tables to 0's */ INT_SET( (int *) &yl, YL_SIZE_1 * YL_SIZE_2, 0 ); INT_SET( (int *) &sc, SC_SIZE, 0 ); INT_SET( (int *) &cp, CP_SIZE, 0 ); INT_SET( (int *) &rp, RP_SIZE, 0 ); INT_SET( (int *) &tq, TQ_SIZE, 0 ); INT_SET( (int *) &rq, RQ_SIZE, 0 ); INT_SET( (int *) &zz, ZZ_SIZE, 1000 ); /* zz[] initialized to 1000's */ INT_SET( (int *) &avn, AVN_SIZE, 0 ); /* avn[0...4] <== 0; */ tp = 0; p1 = 0; tot = 0; ftt = 0; kx = 0; match_score = 0; for ( k = 0; k < np - 1; k++ ) { /* printf( "compute(): looping with k=%d\n", k ); */ if ( sc[k] ) /* If SC counter for current pair already incremented ... */ continue; /* Skip to next pair */ i = colp[k][1]; t = colp[k][3]; qq[0] = i; rq[t-1] = i; tq[i-1] = t; ww = 0; dw = 0; do { ftt++; tot = 0; qh = 1; kx = k; do { kz = colp[kx][2]; l = colp[kx][4]; kx++; bz_sift( &ww, kz, &qh, l, kx, ftt, &tot, &qq_overflow ); if ( qq_overflow ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow from bz_sift() #1 [p=%s; g=%s]\n", get_progname(), get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } #ifndef NOVERBOSE if ( verbose_bozorth ) printf( "x1 %d %d %d %d %d %d\n", kx, colp[kx][0], colp[kx][1], colp[kx][2], colp[kx][3], colp[kx][4] ); #endif } while ( colp[kx][3] == colp[k][3] && colp[kx][1] == colp[k][1] ); /* While the startpoints of lookahead edge pairs are the same as the starting points of the */ /* current pair, set KQ to lookahead edge pair index where above bz_sift() loop left off */ kq = kx; for ( j = 1; j < qh; j++ ) { for ( i = kq; i < np; i++ ) { for ( z = 1; z < 3; z++ ) { if ( z == 1 ) { if ( (j+1) > QQ_SIZE ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow #1 in bozorth3(); j-1 is %d [p=%s; g=%s]\n", get_progname(), j-1, get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } p1 = qq[j]; } else { p1 = tq[p1-1]; } if ( colp[i][2*z] != p1 ) break; } if ( z == 3 ) { z = colp[i][1]; l = colp[i][3]; if ( z != colp[k][1] && l != colp[k][3] ) { kx = i + 1; bz_sift( &ww, z, &qh, l, kx, ftt, &tot, &qq_overflow ); if ( qq_overflow ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow from bz_sift() #2 [p=%s; g=%s]\n", get_progname(), get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } } } } /* END for i */ /* Done looking ahead for current j */ l = 1; t = np + 1; b = kq; while ( t - b > 1 ) { l = ( b + t ) / 2; for ( i = 1; i < 3; i++ ) { if ( i == 1 ) { if ( (j+1) > QQ_SIZE ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow #2 in bozorth3(); j-1 is %d [p=%s; g=%s]\n", get_progname(), j-1, get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } p1 = qq[j]; } else { p1 = tq[p1-1]; } p2 = colp[l-1][i*2-1]; n = SENSE(p1,p2); if ( n < 0 ) { t = l; break; } if ( n > 0 ) { b = l; break; } } if ( n == 0 ) { /* Locates the head of consecutive sequence of edge pairs all having the same starting Subject and On-File edgepoints */ while ( colp[l-2][3] == p2 && colp[l-2][1] == colp[l-1][1] ) l--; kx = l - 1; do { kz = colp[kx][2]; l = colp[kx][4]; kx++; bz_sift( &ww, kz, &qh, l, kx, ftt, &tot, &qq_overflow ); if ( qq_overflow ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow from bz_sift() #3 [p=%s; g=%s]\n", get_progname(), get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } } while ( colp[kx][3] == p2 && colp[kx][1] == colp[kx-1][1] ); break; } /* END if ( n == 0 ) */ } /* END while */ } /* END for j */ if ( tot >= MSTR ) { jj = 0; kk = 0; n = 0; l = 0; for ( i = 0; i < tot; i++ ) { int colp_value = colp[ y[i]-1 ][0]; if ( colp_value < 0 ) { kk += colp_value; n++; } else { jj += colp_value; l++; } } if ( n == 0 ) { n = 1; } else if ( l == 0 ) { l = 1; } fi = (float) jj / (float) l - (float) kk / (float) n; if ( fi > 180.0F ) { fi = ( jj + kk + n * 360 ) / (float) tot; if ( fi > 180.0F ) fi -= 360.0F; } else { fi = ( jj + kk ) / (float) tot; } jj = ROUND(fi); if ( jj <= -180 ) jj += 360; kk = 0; for ( i = 0; i < tot; i++ ) { int diff = colp[ y[i]-1 ][0] - jj; j = SQUARED( diff ); if ( j > TXS && j < CTXS ) kk++; else y[i-kk] = y[i]; } /* END FOR i */ tot -= kk; /* Adjust the total edge pairs TOT based on # of edge pairs skipped */ } /* END if ( tot >= MSTR ) */ if ( tot < MSTR ) { for ( i = tot-1 ; i >= 0; i-- ) { int idx = y[i] - 1; if ( rk[idx] == 0 ) { sc[idx] = -1; } else { sc[idx] = rk[idx]; } } ftt--; } else { /* tot >= MSTR */ /* Otherwise size of TOT group (seq. of TOT indices stored in Y) is large enough to analyze */ int pa = 0; int pb = 0; int pc = 0; int pd = 0; for ( i = 0; i < tot; i++ ) { int idx = y[i] - 1; for ( ii = 1; ii < 4; ii++ ) { kk = ( SQUARED(ii) - ii + 2 ) / 2 - 1; jj = colp[idx][kk]; switch ( ii ) { case 1: if ( colp[idx][0] < 0 ) { pd += colp[idx][0]; pb++; } else { pa += colp[idx][0]; pc++; } break; case 2: avn[ii-1] += pstruct->xcol[jj-1]; avn[ii] += pstruct->ycol[jj-1]; break; default: avn[ii] += gstruct->xcol[jj-1]; avn[ii+1] += gstruct->ycol[jj-1]; break; } /* switch */ } /* END for ii = [1..3] */ for ( ii = 0; ii < 2; ii++ ) { n = -1; l = 1; for ( jj = 1; jj < 3; jj++ ) { p1 = colp[idx][ 2 * ii + jj ]; b = 0; t = yl[ii][tp] + 1; while ( t - b > 1 ) { l = ( b + t ) / 2; p2 = yy[l-1][ii][tp]; n = SENSE(p1,p2); if ( n < 0 ) { t = l; } else { if ( n > 0 ) { b = l; } else { break; } } } /* END WHILE */ if ( n != 0 ) { if ( n == 1 ) ++l; for ( kk = yl[ii][tp]; kk >= l; --kk ) { yy[kk][ii][tp] = yy[kk-1][ii][tp]; } ++yl[ii][tp]; yy[l-1][ii][tp] = p1; } /* END if ( n != 0 ) */ /* Otherwise, edgepoint already stored in YY */ } /* END FOR jj in [1,2] */ } /* END FOR ii in [0,1] */ } /* END FOR i */ if ( pb == 0 ) { pb = 1; } else if ( pc == 0 ) { pc = 1; } fi = (float) pa / (float) pc - (float) pd / (float) pb; if ( fi > 180.0F ) { fi = ( pa + pd + pb * 360 ) / (float) tot; if ( fi > 180.0F ) fi -= 360.0F; } else { fi = ( pa + pd ) / (float) tot; } pa = ROUND(fi); if ( pa <= -180 ) pa += 360; avv[tp][0] = pa; for ( ii = 1; ii < 5; ii++ ) { avv[tp][ii] = avn[ii] / tot; avn[ii] = 0; } ct[tp] = tot; gct[tp] = tot; if ( tot > match_score ) /* If current TOT > match_score ... */ match_score = tot; /* Keep track of max TOT in match_score */ ctt[tp] = 0; /* Init CTT[TP] to 0 */ ctp[tp][0] = tp; /* Store TP into CTP */ for ( ii = 0; ii < tp; ii++ ) { int found; int diff; int * avv_tp_ptr = &avv[tp][0]; int * avv_ii_ptr = &avv[ii][0]; diff = *avv_tp_ptr++ - *avv_ii_ptr++; j = SQUARED( diff ); if ( j > TXS && j < CTXS ) continue; ll = *avv_tp_ptr++ - *avv_ii_ptr++; jj = *avv_tp_ptr++ - *avv_ii_ptr++; kk = *avv_tp_ptr++ - *avv_ii_ptr++; j = *avv_tp_ptr++ - *avv_ii_ptr++; { float tt, ai, dz; tt = (float) (SQUARED(ll) + SQUARED(jj)); ai = (float) (SQUARED(j) + SQUARED(kk)); fi = ( 2.0F * TK ) * ( tt + ai ); dz = tt - ai; if ( SQUARED(dz) > SQUARED(fi) ) continue; } if ( ll ) { if ( m1_xyt ) fi = ( 180.0F / PI_SINGLE ) * atanf( (float) -jj / (float) ll ); else fi = ( 180.0F / PI_SINGLE ) * atanf( (float) jj / (float) ll ); if ( fi < 0.0F ) { if ( ll < 0 ) fi += 180.5F; else fi -= 0.5F; } else { if ( ll < 0 ) fi -= 180.5F; else fi += 0.5F; } jj = (int) fi; if ( jj <= -180 ) jj += 360; } else { if ( m1_xyt ) { if ( jj > 0 ) jj = -90; else jj = 90; } else { if ( jj > 0 ) jj = 90; else jj = -90; } } if ( kk ) { if ( m1_xyt ) fi = ( 180.0F / PI_SINGLE ) * atanf( (float) -j / (float) kk ); else fi = ( 180.0F / PI_SINGLE ) * atanf( (float) j / (float) kk ); if ( fi < 0.0F ) { if ( kk < 0 ) fi += 180.5F; else fi -= 0.5F; } else { if ( kk < 0 ) fi -= 180.5F; else fi += 0.5F; } j = (int) fi; if ( j <= -180 ) j += 360; } else { if ( m1_xyt ) { if ( j > 0 ) j = -90; else j = 90; } else { if ( j > 0 ) j = 90; else j = -90; } } pa = 0; pb = 0; pc = 0; pd = 0; if ( avv[tp][0] < 0 ) { pd += avv[tp][0]; pb++; } else { pa += avv[tp][0]; pc++; } if ( avv[ii][0] < 0 ) { pd += avv[ii][0]; pb++; } else { pa += avv[ii][0]; pc++; } if ( pb == 0 ) { pb = 1; } else if ( pc == 0 ) { pc = 1; } fi = (float) pa / (float) pc - (float) pd / (float) pb; if ( fi > 180.0F ) { fi = ( pa + pd + pb * 360 ) / 2.0F; if ( fi > 180.0F ) fi -= 360.0F; } else { fi = ( pa + pd ) / 2.0F; } pb = ROUND(fi); if ( pb <= -180 ) pb += 360; pa = jj - j; pa = IANGLE180(pa); kk = SQUARED(pb-pa); /* Was: if ( SQUARED(kk) > TXS && kk < CTXS ) : assume typo */ if ( kk > TXS && kk < CTXS ) continue; found = 0; for ( kk = 0; kk < 2; kk++ ) { jj = 0; ll = 0; do { while ( yy[jj][kk][ii] < yy[ll][kk][tp] && jj < yl[kk][ii] ) { jj++; } while ( yy[jj][kk][ii] > yy[ll][kk][tp] && ll < yl[kk][tp] ) { ll++; } if ( yy[jj][kk][ii] == yy[ll][kk][tp] && jj < yl[kk][ii] && ll < yl[kk][tp] ) { found = 1; break; } } while ( jj < yl[kk][ii] && ll < yl[kk][tp] ); if ( found ) break; } /* END for kk */ if ( ! found ) { /* If we didn't find what we were searching for ... */ gct[ii] += ct[tp]; if ( gct[ii] > match_score ) match_score = gct[ii]; ++ctt[ii]; ctp[ii][ctt[ii]] = tp; } } /* END for ii in [0,TP-1] prior TP group */ tp++; /* Bump TP counter */ } /* END ELSE if ( tot == MSTR ) */ if ( qh > QQ_SIZE ) { fprintf( stderr, "%s: WARNING: bz_match_score(): qq[] overflow #3 in bozorth3(); qh-1 is %d [p=%s; g=%s]\n", get_progname(), qh-1, get_probe_filename(), get_gallery_filename() ); return QQ_OVERFLOW_SCORE; } for ( i = qh - 1; i > 0; i-- ) { n = qq[i] - 1; if ( ( tq[n] - 1 ) >= 0 ) { rq[tq[n]-1] = 0; tq[n] = 0; zz[n] = 1000; } } for ( i = dw - 1; i >= 0; i-- ) { n = rr[i] - 1; if ( tq[n] ) { rq[tq[n]-1] = 0; tq[n] = 0; } } i = 0; j = ww - 1; while ( i >= 0 && j >= 0 ) { if ( nn[j] < mm[j] ) { ++nn[j]; for ( i = ww - 1; i >= 0; i-- ) { int rt = rx[i]; if ( rt < 0 ) { rt = - rt; rt--; z = rf[i][nn[i]-1]-1; if (( tq[z] != (rt+1) && tq[z] ) || ( rq[rt] != (z+1) && rq[rt] )) break; tq[z] = rt+1; rq[rt] = z+1; rr[i] = z+1; } else { rt--; z = cf[i][nn[i]-1]-1; if (( tq[rt] != (z+1) && tq[rt] ) || ( rq[z] != (rt+1) && rq[z] )) break; tq[rt] = z+1; rq[z] = rt+1; rr[i] = rt+1; } } /* END for i */ if ( i >= 0 ) { for ( z = i + 1; z < ww; z++) { n = rr[z] - 1; if ( tq[n] - 1 >= 0 ) { rq[tq[n]-1] = 0; tq[n] = 0; } } j = ww - 1; } } else { nn[j] = 1; j--; } } if ( tp > 1999 ) break; dw = ww; } while ( j >= 0 ); /* END while endpoint group remain ... */ if ( tp > 1999 ) break; n = qq[0] - 1; if ( tq[n] - 1 >= 0 ) { rq[tq[n]-1] = 0; tq[n] = 0; } for ( i = ww-1; i >= 0; i-- ) { n = rx[i]; if ( n < 0 ) { n = - n; rp[n-1] = 0; } else { cp[n-1] = 0; } } } /* END FOR each edge pair */ if ( match_score < MMSTR ) { return match_score; } match_score = bz_final_loop( tp ); return match_score; }
/* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. */ STATIC int /* error code or 0 */ xfs_ialloc_ag_alloc( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* alloc group buffer */ int *alloc) { xfs_agi_t *agi; /* allocation group header */ xfs_alloc_arg_t args; /* allocation argument structure */ int blks_per_cluster; /* fs blocks per inode cluster */ xfs_btree_cur_t *cur; /* inode btree cursor */ xfs_daddr_t d; /* disk addr of buffer */ int error; xfs_buf_t *fbuf; /* new free inodes' buffer */ xfs_dinode_t *free; /* new free inode structure */ int i; /* inode counter */ int j; /* block counter */ int nbufs; /* num bufs of new inodes */ xfs_agino_t newino; /* new first inode's number */ xfs_agino_t newlen; /* new number of inodes */ int ninodes; /* num inodes per buf */ xfs_agino_t thisino; /* current inode number, for loop */ int version; /* inode version number to use */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ args.tp = tp; args.mp = tp->t_mountp; /* * Locking will ensure that we don't have two callers in here * at one time. */ newlen = XFS_IALLOC_INODES(args.mp); if (args.mp->m_maxicount && args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) return XFS_ERROR(ENOSPC); args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); /* * First try to allocate inodes contiguous with the last-allocated * chunk of inodes. If the filesystem is striped, this will fill * an entire stripe unit with inodes. */ agi = XFS_BUF_TO_AGI(agbp); newino = be32_to_cpu(agi->agi_newino); args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + XFS_IALLOC_BLOCKS(args.mp); if (likely(newino != NULLAGINO && (args.agbno < be32_to_cpu(agi->agi_length)))) { args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno); args.type = XFS_ALLOCTYPE_THIS_BNO; args.mod = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.prod = 1; args.alignment = 1; /* * Allow space for the inode btree to split. */ args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; if ((error = xfs_alloc_vextent(&args))) return error; } else args.fsbno = NULLFSBLOCK; if (unlikely(args.fsbno == NULLFSBLOCK)) { /* * Set the alignment for the allocation. * If stripe alignment is turned on then align at stripe unit * boundary. * If the cluster size is smaller than a filesystem block * then we're doing I/O for inodes in filesystem block size * pieces, so don't need alignment anyway. */ isaligned = 0; if (args.mp->m_sinoalign) { ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); args.alignment = args.mp->m_dalign; isaligned = 1; } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && args.mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) args.alignment = args.mp->m_sb.sb_inoalignmt; else args.alignment = 1; /* * Need to figure out where to allocate the inode blocks. * Ideally they should be spaced out through the a.g. * For now, just allocate blocks up front. */ args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno); /* * Allocate a fixed-size extent of inodes. */ args.type = XFS_ALLOCTYPE_NEAR_BNO; args.mod = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.prod = 1; /* * Allow space for the inode btree to split. */ args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; if ((error = xfs_alloc_vextent(&args))) return error; } /* * If stripe alignment is turned on, then try again with cluster * alignment. */ if (isaligned && args.fsbno == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno); if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && args.mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) args.alignment = args.mp->m_sb.sb_inoalignmt; else args.alignment = 1; if ((error = xfs_alloc_vextent(&args))) return error; } if (args.fsbno == NULLFSBLOCK) { *alloc = 0; return 0; } ASSERT(args.len == args.minlen); /* * Convert the results. */ newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); /* * Loop over the new block(s), filling in the inodes. * For small block sizes, manipulate the inodes in buffers * which are multiples of the blocks size. */ if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) { blks_per_cluster = 1; nbufs = (int)args.len; ninodes = args.mp->m_sb.sb_inopblock; } else { blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) / args.mp->m_sb.sb_blocksize; nbufs = (int)args.len / blks_per_cluster; ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock; } /* * Figure out what version number to use in the inodes we create. * If the superblock version has caught up to the one that supports * the new inode format, then use the new inode version. Otherwise * use the old version so that old kernels will continue to be * able to use the file system. */ if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb)) version = XFS_DINODE_VERSION_2; else version = XFS_DINODE_VERSION_1; for (j = 0; j < nbufs; j++) { /* * Get the block. */ d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno), args.agbno + (j * blks_per_cluster)); fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, args.mp->m_bsize * blks_per_cluster, XFS_BUF_LOCK); ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); /* * Set initial values for the inodes in this buffer. */ xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); for (i = 0; i < ninodes; i++) { free = XFS_MAKE_IPTR(args.mp, fbuf, i); INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); INT_SET(free->di_core.di_version, ARCH_CONVERT, version); INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); xfs_ialloc_log_di(tp, fbuf, i, XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); } xfs_trans_inode_alloc_buf(tp, fbuf); } be32_add(&agi->agi_count, newlen); be32_add(&agi->agi_freecount, newlen); down_read(&args.mp->m_peraglock); args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen; up_read(&args.mp->m_peraglock); agi->agi_newino = cpu_to_be32(newino); /* * Insert records describing the new inode chunk into the btree. */ cur = xfs_btree_init_cursor(args.mp, tp, agbp, be32_to_cpu(agi->agi_seqno), XFS_BTNUM_INO, (xfs_inode_t *)0, 0); for (thisino = newino; thisino < newino + newlen; thisino += XFS_INODES_PER_CHUNK) { if ((error = xfs_inobt_lookup_eq(cur, thisino, XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 0); if ((error = xfs_inobt_insert(cur, &i))) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 1); } xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); /* * Log allocation group header fields */ xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); /* * Modify/log superblock values for inode count and inode free count. */ xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); *alloc = 1; return 0; }
/* * Check the limits and timers of a dquot and start or reset timers * if necessary. * This gets called even when quota enforcement is OFF, which makes our * life a little less complicated. (We just don't reject any quota * reservations in that case, when enforcement is off). * We also return 0 as the values of the timers in Q_GETQUOTA calls, when * enforcement's off. * In contrast, warnings are a little different in that they don't * 'automatically' get started when limits get exceeded. */ void xfs_qm_adjust_dqtimers( xfs_mount_t *mp, xfs_disk_dquot_t *d) { ASSERT(!INT_ISZERO(d->d_id, ARCH_CONVERT)); #ifdef QUOTADEBUG if (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)) ASSERT(INT_GET(d->d_blk_softlimit, ARCH_CONVERT) <= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)); if (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)) ASSERT(INT_GET(d->d_ino_softlimit, ARCH_CONVERT) <= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)); if (INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)) ASSERT(INT_GET(d->d_rtb_softlimit, ARCH_CONVERT) <= INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)); #endif if (INT_ISZERO(d->d_btimer, ARCH_CONVERT)) { if ((INT_GET(d->d_blk_softlimit, ARCH_CONVERT) && (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) || (INT_GET(d->d_blk_hardlimit, ARCH_CONVERT) && (INT_GET(d->d_bcount, ARCH_CONVERT) >= INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { INT_SET(d->d_btimer, ARCH_CONVERT, get_seconds() + XFS_QI_BTIMELIMIT(mp)); } } else { if ((INT_ISZERO(d->d_blk_softlimit, ARCH_CONVERT) || (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) && (INT_ISZERO(d->d_blk_hardlimit, ARCH_CONVERT) || (INT_GET(d->d_bcount, ARCH_CONVERT) < INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) { INT_ZERO(d->d_btimer, ARCH_CONVERT); } } if (INT_ISZERO(d->d_itimer, ARCH_CONVERT)) { if ((INT_GET(d->d_ino_softlimit, ARCH_CONVERT) && (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) || (INT_GET(d->d_ino_hardlimit, ARCH_CONVERT) && (INT_GET(d->d_icount, ARCH_CONVERT) >= INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { INT_SET(d->d_itimer, ARCH_CONVERT, get_seconds() + XFS_QI_ITIMELIMIT(mp)); } } else { if ((INT_ISZERO(d->d_ino_softlimit, ARCH_CONVERT) || (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) && (INT_ISZERO(d->d_ino_hardlimit, ARCH_CONVERT) || (INT_GET(d->d_icount, ARCH_CONVERT) < INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) { INT_ZERO(d->d_itimer, ARCH_CONVERT); } } if (INT_ISZERO(d->d_rtbtimer, ARCH_CONVERT)) { if ((INT_GET(d->d_rtb_softlimit, ARCH_CONVERT) && (INT_GET(d->d_rtbcount, ARCH_CONVERT) >= INT_GET(d->d_rtb_softlimit, ARCH_CONVERT))) || (INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT) && (INT_GET(d->d_rtbcount, ARCH_CONVERT) >= INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)))) { INT_SET(d->d_rtbtimer, ARCH_CONVERT, get_seconds() + XFS_QI_RTBTIMELIMIT(mp)); } } else { if ((INT_ISZERO(d->d_rtb_softlimit, ARCH_CONVERT) || (INT_GET(d->d_rtbcount, ARCH_CONVERT) < INT_GET(d->d_rtb_softlimit, ARCH_CONVERT))) && (INT_ISZERO(d->d_rtb_hardlimit, ARCH_CONVERT) || (INT_GET(d->d_rtbcount, ARCH_CONVERT) < INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)))) { INT_ZERO(d->d_rtbtimer, ARCH_CONVERT); } } }
/* * Writes a modified inode's changes out to the inode's on disk home. * Originally based on xfs_iflush_int() from xfs_inode.c in the kernel. */ int libxfs_iflush_int(xfs_inode_t *ip, xfs_buf_t *bp) { xfs_inode_log_item_t *iip; xfs_dinode_t *dip; xfs_mount_t *mp; ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > ip->i_df.if_ext_max); iip = ip->i_itemp; mp = ip->i_mount; /* set *dip = inode's place in the buffer */ dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); #ifdef DEBUG ASSERT(ip->i_d.di_magic == XFS_DINODE_MAGIC); if ((ip->i_d.di_mode & IFMT) == IFREG) { ASSERT( (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS) || (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) ); } else if ((ip->i_d.di_mode & IFMT) == IFDIR) { ASSERT( (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS) || (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) || (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) ); } ASSERT(ip->i_d.di_nextents+ip->i_d.di_anextents <= ip->i_d.di_nblocks); ASSERT(ip->i_d.di_forkoff <= mp->m_sb.sb_inodesize); #endif /* * Copy the dirty parts of the inode into the on-disk * inode. We always copy out the core of the inode, * because if the inode is dirty at all the core must * be. */ xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1, ARCH_CONVERT); /* * If this is really an old format inode and the superblock version * has not been updated to support only new format inodes, then * convert back to the old inode format. If the superblock version * has been updated, then make the conversion permanent. */ ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || XFS_SB_VERSION_HASNLINK(&mp->m_sb)); if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { /* * Convert it back. */ ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink); } else { /* * The superblock version has already been bumped, * so just make the conversion to the new inode * format permanent. */ ip->i_d.di_version = XFS_DINODE_VERSION_2; INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2); ip->i_d.di_onlink = 0; INT_ZERO(dip->di_core.di_onlink, ARCH_CONVERT); bzero(&(ip->i_d.di_pad[0]), sizeof(ip->i_d.di_pad)); bzero(&(dip->di_core.di_pad[0]), sizeof(dip->di_core.di_pad)); ASSERT(ip->i_d.di_projid == 0); } } if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) return EFSCORRUPTED; if (XFS_IFORK_Q(ip)) { /* The only error from xfs_iflush_fork is on the data fork. */ xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); } return 0; }