/* * Handle a BMAPCHWRMODE request to upgrade a client bmap lease from * READ-only to READ+WRITE. * @rq: RPC request. */ int slm_rmc_handle_bmap_chwrmode(struct pscrpc_request *rq) { struct bmap_mds_lease *bml = NULL; struct srm_bmap_chwrmode_req *mq; struct srm_bmap_chwrmode_rep *mp; struct fidc_membh *f = NULL; struct bmapc_memb *b = NULL; struct bmap_mds_info *bmi; SL_RSX_ALLOCREP(rq, mq, mp); mp->rc = -slm_fcmh_get(&mq->sbd.sbd_fg, &f); if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->rc = bmap_lookup(f, mq->sbd.sbd_bmapno, &b); if (mp->rc) PFL_GOTOERR(out, mp->rc); bmi = bmap_2_bmi(b); bml = mds_bmap_getbml(b, mq->sbd.sbd_seq, mq->sbd.sbd_nid, mq->sbd.sbd_pid); if (bml == NULL) PFL_GOTOERR(out, mp->rc = -EINVAL); mp->rc = mds_bmap_bml_chwrmode(bml, mq->prefios[0]); if (mp->rc == -PFLERR_ALREADY) mp->rc = 0; else if (mp->rc) PFL_GOTOERR(out, mp->rc); mp->sbd = mq->sbd; mp->sbd.sbd_seq = bml->bml_seq; mp->sbd.sbd_key = bmi->bmi_assign->odtr_crc; psc_assert(bmi->bmi_wr_ion); mp->sbd.sbd_ios = rmmi2resm(bmi->bmi_wr_ion)->resm_res_id; out: if (bml) mds_bmap_bml_release(bml); if (b) bmap_op_done(b); if (f) fcmh_op_done(f); return (0); }
/* * Get the specified bmap. * @f: fcmh. * @n: bmap number. * @rw: access mode. * @flags: retrieval parameters. * @bp: value-result bmap pointer. * Notes: returns the bmap referenced and locked. */ int _bmap_get(const struct pfl_callerinfo *pci, struct fidc_membh *f, sl_bmapno_t n, enum rw rw, int flags, struct bmap **bp) { int rc = 0, new_bmap, bmaprw = 0; struct bmap *b; if (bp) *bp = NULL; if (rw) bmaprw = rw == SL_WRITE ? BMAPF_WR : BMAPF_RD; new_bmap = flags & BMAPGETF_CREATE; b = bmap_lookup_cache(f, n, bmaprw, &new_bmap); if (b == NULL) { rc = ENOENT; goto out; } if (flags & BMAPGETF_NONBLOCK) { if (b->bcm_flags & BMAPF_LOADING) goto out; } else bmap_wait_locked(b, b->bcm_flags & BMAPF_LOADING); if (b->bcm_flags & BMAPF_LOADED) goto loaded; if (flags & BMAPGETF_NORETRIEVE) { if (b->bcm_flags & BMAPF_LOADED) OPSTAT_INCR("bmap-already-loaded"); else OPSTAT_INCR("bmap-not-yet-loaded"); goto out; } b->bcm_flags |= BMAPF_LOADING; DEBUG_BMAP(PLL_DIAG, b, "loading bmap; flags=%d", flags); BMAP_ULOCK(b); /* msl_bmap_retrieve(), iod_bmap_retrieve(), mds_bmap_read() */ rc = sl_bmap_ops.bmo_retrievef(b, flags); BMAP_LOCK(b); if (flags & BMAPGETF_NONBLOCK) { if (rc) b->bcm_flags &= ~BMAPF_LOADING; goto out; } b->bcm_flags &= ~BMAPF_LOADING; if (!rc) { b->bcm_flags |= BMAPF_LOADED; bmap_wake_locked(b); } loaded: /* * Early bail out should be safe. There is only one place the client * will do a bmap lookup. And it that code path, we just add DIO flag * to the bmap. See msrcm_handle_bmapdio(). */ if (rc || !bmaprw) goto out; /* * Others wishing to access this bmap in the same mode must wait * until MODECHNG ops have completed. If the desired mode is * present then a thread may proceed without blocking here so * long as it only accesses structures which pertain to its * mode. */ if (flags & BMAPGETF_NONBLOCK) { if (b->bcm_flags & BMAPF_MODECHNG) goto out; } else bmap_wait_locked(b, b->bcm_flags & BMAPF_MODECHNG); /* * Not all lookups are done with the intent of changing the bmap * mode i.e. bmap_lookup() does not specify a rw value. */ if (!(bmaprw & b->bcm_flags) && sl_bmap_ops.bmo_mode_chngf) { psc_assert(!(b->bcm_flags & BMAPF_MODECHNG)); b->bcm_flags |= BMAPF_MODECHNG; DEBUG_BMAP(PLL_DIAG, b, "mode change (rw=%d)", rw); BMAP_ULOCK(b); psc_assert(rw == SL_WRITE || rw == SL_READ); /* client only: call msl_bmap_modeset() */ rc = sl_bmap_ops.bmo_mode_chngf(b, rw, flags); BMAP_LOCK(b); } out: if (b) { DEBUG_BMAP(rc && (rc != SLERR_BMAP_INVALID || (flags & BMAPGETF_NOAUTOINST) == 0) ? PLL_ERROR : PLL_DIAG, b, "grabbed rc=%d", rc); if (rc) bmap_op_done(b); else *bp = b; } return (rc); }
/* * Handle a request to do replication from a client. May also * reinitialize some parameters of the replication, such as priority, if * the request already exists in the system. */ int mds_repl_addrq(const struct sl_fidgen *fgp, sl_bmapno_t bmapno, sl_bmapno_t *nbmaps, sl_replica_t *iosv, int nios, int sys_prio, int usr_prio) { int tract[NBREPLST], ret_hasvalid[NBREPLST]; int iosidx[SL_MAX_REPLICAS], rc, flags; sl_bmapno_t nbmaps_processed = 0; struct fidc_membh *f = NULL; struct bmap *b; /* Perform sanity checks on request. */ if (nios < 1 || nios > SL_MAX_REPLICAS || *nbmaps == 0) return (-EINVAL); rc = slm_fcmh_get(fgp, &f); if (rc) return (-rc); if (!fcmh_isdir(f) && !fcmh_isreg(f)) PFL_GOTOERR(out, rc = -PFLERR_NOTSUP); /* Lookup replica(s)' indexes in our replica table. */ rc = -mds_repl_iosv_lookup_add(current_vfsid, fcmh_2_inoh(f), iosv, iosidx, nios); if (rc) PFL_GOTOERR(out, rc); /* * If we are modifying a directory, we are done as just the * replica table needs to be updated. */ if (fcmh_isdir(f)) PFL_GOTOERR(out, 0); /* * Setup structure to ensure at least one VALID replica exists. */ brepls_init(ret_hasvalid, 0); ret_hasvalid[BREPLST_VALID] = 1; /* * Setup transitions to enqueue a replication. */ brepls_init(tract, -1); tract[BREPLST_INVALID] = BREPLST_REPL_QUEUED; tract[BREPLST_GARBAGE_SCHED] = BREPLST_REPL_QUEUED; tract[BREPLST_GARBAGE_QUEUED] = BREPLST_REPL_QUEUED; /* Wildcards shouldn't result in errors on zero-length files. */ if (*nbmaps != (sl_bmapno_t)-1) rc = -SLERR_BMAP_INVALID; for (; *nbmaps && bmapno < fcmh_nvalidbmaps(f); bmapno++, --*nbmaps, nbmaps_processed++) { if (nbmaps_processed >= SLM_REPLRQ_NBMAPS_MAX) { rc = -PFLERR_WOULDBLOCK; break; } rc = -bmap_get(f, bmapno, SL_WRITE, &b); if (rc) PFL_GOTOERR(out, rc); /* * If no VALID replicas exist, the bmap must be * uninitialized/all zeroes; skip it. */ if (mds_repl_bmap_walk_all(b, NULL, ret_hasvalid, REPL_WALKF_SCIRCUIT) == 0) { bmap_op_done(b); continue; } /* * We do not follow the standard "retifset" API here * because we need to preserve DIRTY if it gets set * instead of some other state getting returned. */ flags = 0; _mds_repl_bmap_walk(b, tract, NULL, 0, iosidx, nios, slm_repl_addrq_cb, &flags); /* both default to -1 in parse_replrq() */ bmap_2_bmi(b)->bmi_sys_prio = sys_prio; bmap_2_bmi(b)->bmi_usr_prio = usr_prio; if (flags & FLAG_DIRTY) mds_bmap_write_logrepls(b); else if (sys_prio != -1 || usr_prio != -1) slm_repl_upd_write(b, 0); bmap_op_done_type(b, BMAP_OPCNT_LOOKUP); if (flags & FLAG_REPLICA_STATE_INVALID) { /* See pfl_register_errno() */ rc = -SLERR_REPLICA_STATE_INVALID; break; } } out: if (f) fcmh_op_done(f); *nbmaps = nbmaps_processed; return (rc); }
int mds_inode_dump(int vfsid, struct sl_ino_compat *sic, struct slash_inode_handle *ih, void *readh) { struct fidc_membh *f; struct bmapc_memb *b; struct mio_fh *fh; sl_bmapno_t i; int rc, fl; void *th; f = inoh_2_fcmh(ih); th = inoh_2_mfh(ih); fh = inoh_2_mfh(ih); fl = BMAPGETF_CREATE | BMAPGETF_NOAUTOINST; if (sic) fl |= BMAPGETF_NORETRIEVE; for (i = 0; ; i++) { fh->fh = readh; rc = bmap_getf(f, i, SL_WRITE, fl, &b); fh->fh = th; if (rc == SLERR_BMAP_INVALID) { (void)INOH_RLOCK(ih); break; } if (rc) return (rc); if (sic) { rc = sic->sic_read_bmap(b, readh); if (rc) { bmap_op_done(b); (void)INOH_RLOCK(ih); if (rc == SLERR_BMAP_INVALID) break; return (rc); } } rc = mds_bmap_write(b, NULL, NULL); bmap_op_done(b); (void)INOH_RLOCK(ih); if (rc) return (rc); } rc = mds_inox_write(vfsid, ih, NULL, NULL); if (rc) return (rc); rc = mds_inode_write(vfsid, ih, NULL, NULL); if (rc) return (rc); mdsio_fsync(vfsid, &rootcreds, 1, th); return (0); }