/* * Return the next SLASH2 FID to use. Note that from ZFS point of view, * it is perfectly okay that we use the same SLASH2 FID to refer to * different files/directories. However, doing so can confuse our * clients (think identity theft). So we must make sure that we never * reuse a SLASH2 FID, even after a crash. * * The siteid has already been baked into the initial cursor file. */ int slm_get_next_slashfid(slfid_t *fidp) { uint64_t fid; spinlock(&slm_fid_lock); /* * This should never happen. If it does, we crash to let the * sysadmin know. He could fix this if there is still room in * the cycle bits. We have to let the sysadmin know otherwise * they will not know to bump the cycle bits. */ if (FID_GET_INUM(slm_next_fid) >= FID_MAX_INUM) { psclog_warnx("max FID "SLPRI_FID" reached, manual " "intervention needed (bump the cycle bits)", slm_next_fid); freelock(&slm_fid_lock); return (ENOSPC); } fid = slm_next_fid++; freelock(&slm_fid_lock); psclog_diag("most recently allocated FID: "SLPRI_FID, fid); *fidp = fid; return (0); }
int slm_rmc_handle_lookup(struct pscrpc_request *rq) { struct fidc_membh *p = NULL; struct srm_lookup_req *mq; struct srm_lookup_rep *mp; int vfsid; SL_RSX_ALLOCREP(rq, mq, mp); mp->rc = slfid_to_vfsid(mq->pfg.fg_fid, &vfsid); if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->rc = -slm_fcmh_get(&mq->pfg, &p); if (mp->rc) PFL_GOTOERR(out, mp->rc); mq->name[sizeof(mq->name) - 1] = '\0'; psclog_diag("lookup: pfid="SLPRI_FID" name=%s", fcmh_2_mfid(p), mq->name); if (fcmh_2_mfid(p) == SLFID_ROOT && strcmp(mq->name, SL_RPATH_META_DIR) == 0) PFL_GOTOERR(out, mp->rc = -EINVAL); if (mq->pfg.fg_fid == SLFID_ROOT && use_global_mount) { uint64_t fid; struct sl_site *site; mp->rc = -ENOENT; CONF_LOCK(); CONF_FOREACH_SITE(site) { if (strcmp(mq->name, site->site_name) != 0) continue; fid = SLFID_ROOT; FID_SET_SITEID(fid, site->site_id); mp->xattrsize = 0; mp->attr.sst_fg.fg_fid = fid; mp->attr.sst_fg.fg_gen = 2; slm_root_attributes(&mp->attr); mp->rc = 0; break; } CONF_ULOCK(); goto out; }
int slm_rmc_handle_getattr(struct pscrpc_request *rq) { const struct srm_getattr_req *mq; struct srm_getattr_rep *mp; struct fidc_membh *f; int vfsid; SL_RSX_ALLOCREP(rq, mq, mp); psclog_diag("pfid="SLPRI_FID, mq->fg.fg_fid); if (mq->fg.fg_fid == SLFID_ROOT && use_global_mount) { mp->attr.sst_fg.fg_fid = SLFID_ROOT; mp->attr.sst_fg.fg_gen = FGEN_ANY-1; slm_root_attributes(&mp->attr); return (0); } mp->rc = -slm_fcmh_get(&mq->fg, &f); if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->rc = slfid_to_vfsid(mq->fg.fg_fid, &vfsid); if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->xattrsize = mdsio_hasxattrs(vfsid, &rootcreds, fcmh_2_mfid(f)); FCMH_LOCK(f); mp->attr = f->fcmh_sstb; out: if (f) fcmh_op_done(f); return (0); }
/* * Adjust the bandwidth estimate between two IONs. * @src: source resm. * @dst: destination resm. * @amt: adjustment amount in bytes. */ int resmpair_bw_adj(struct sl_resm *src, struct sl_resm *dst, int64_t amt, int rc) { int ret = 1; struct resprof_mds_info *r_min, *r_max; struct rpmi_ios *is, *id; int64_t src_total, dst_total; int64_t cap = (int64_t)slm_upsch_bandwidth; /* sort by addr to avoid deadlock */ r_min = MIN(res2rpmi(src->resm_res), res2rpmi(dst->resm_res)); r_max = MAX(res2rpmi(src->resm_res), res2rpmi(dst->resm_res)); RPMI_LOCK(r_min); RPMI_LOCK(r_max); is = res2rpmi_ios(src->resm_res); id = res2rpmi_ios(dst->resm_res); psc_assert(amt); /* reserve */ if (amt > 0) { if (cap) { src_total = is->si_repl_ingress_pending + is->si_repl_egress_pending + amt; dst_total = is->si_repl_ingress_pending + is->si_repl_egress_pending + amt; if ((src_total > cap * BW_UNITSZ) || dst_total > cap * BW_UNITSZ) { ret = 0; goto out; } } is->si_repl_egress_pending += amt; id->si_repl_ingress_pending += amt; psclog_diag("adjust bandwidth; src=%s dst=%s amt=%"PRId64, src->resm_name, dst->resm_name, amt); } /* unreserve */ if (amt < 0) { is->si_repl_egress_pending += amt; id->si_repl_ingress_pending += amt; psc_assert(is->si_repl_egress_pending >= 0); psc_assert(id->si_repl_ingress_pending >= 0); if (!rc) { is->si_repl_egress_aggr += -amt; id->si_repl_ingress_aggr += -amt; } /* * We released some bandwidth; wake anyone waiting for * some. */ #if 0 CSVC_WAKE(src->resm_csvc); CSVC_WAKE(dst->resm_csvc); #endif } out: RPMI_ULOCK(r_max); RPMI_ULOCK(r_min); return (ret); }