static int __seq_server_alloc_super(struct lu_server_seq *seq, struct lu_seq_range *out, const struct lu_env *env) { struct lu_seq_range *space = &seq->lss_space; int rc; ENTRY; LASSERT(range_is_sane(space)); if (range_is_exhausted(space)) { CERROR("%s: Sequences space is exhausted\n", seq->lss_name); RETURN(-ENOSPC); } else { range_alloc(out, space, seq->lss_width); } rc = seq_store_update(env, seq, out, 1 /* sync */); LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n", seq->lss_name, rc, PRANGE(out)); RETURN(rc); }
int seq_server_alloc_spec(struct lu_server_seq *seq, struct lu_seq_range *spec, const struct lu_env *env) { struct lu_seq_range *space = &seq->lss_space; int rc = -ENOSPC; ENTRY; /* * In some cases (like recovery after a disaster) * we may need to allocate sequences manually * Notice some sequences can be lost if requested * range doesn't start at the beginning of current * free space. Also notice it's not possible now * to allocate sequences out of natural order. */ if (spec->lsr_start >= spec->lsr_end) RETURN(-EINVAL); if (spec->lsr_flags != LU_SEQ_RANGE_MDT && spec->lsr_flags != LU_SEQ_RANGE_OST) RETURN(-EINVAL); mutex_lock(&seq->lss_mutex); if (spec->lsr_start >= space->lsr_start) { space->lsr_start = spec->lsr_end; rc = seq_store_update(env, seq, spec, 1 /* sync */); LCONSOLE_INFO("%s: "DRANGE" sequences allocated: rc = %d \n", seq->lss_name, PRANGE(spec), rc); } mutex_unlock(&seq->lss_mutex); RETURN(rc); }
static int __seq_set_init(const struct lu_env *env, struct lu_server_seq *seq) { struct lu_seq_range *space = &seq->lss_space; int rc; range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width); range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width); rc = seq_store_update(env, seq, NULL, 1); return rc; }
/* * This function implements new seq allocation algorithm using async * updates to seq file on disk. ref bug 18857 for details. * there are four variable to keep track of this process * * lss_space; - available lss_space * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be * not yet committed * * when lss_lowater_set reaches the end it is replaced with hiwater one and * a write operation is initiated to allocate new hiwater range. * if last seq write opearion is still not commited, current operation is * flaged as sync write op. */ static int range_alloc_set(const struct lu_env *env, struct lu_seq_range *out, struct lu_server_seq *seq) { struct lu_seq_range *space = &seq->lss_space; struct lu_seq_range *loset = &seq->lss_lowater_set; struct lu_seq_range *hiset = &seq->lss_hiwater_set; int rc = 0; if (lu_seq_range_is_zero(loset)) __seq_set_init(env, seq); if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */ loset->lsr_start = loset->lsr_end; if (lu_seq_range_is_exhausted(loset)) { /* reached high water mark. */ struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev; int obd_num_clients = dev->ld_obd->obd_num_exports; __u64 set_sz; /* calculate new seq width based on number of clients */ set_sz = max(seq->lss_set_width, obd_num_clients * seq->lss_width); set_sz = min(lu_seq_range_space(space), set_sz); /* Switch to hiwater range now */ *loset = *hiset; /* allocate new hiwater range */ range_alloc(hiset, space, set_sz); /* update ondisk seq with new *space */ rc = seq_store_update(env, seq, NULL, seq->lss_need_sync); } LASSERTF(!lu_seq_range_is_exhausted(loset) || lu_seq_range_is_sane(loset), DRANGE"\n", PRANGE(loset)); if (rc == 0) range_alloc(out, loset, seq->lss_width); RETURN(rc); }
int seq_server_init(struct lu_server_seq *seq, struct dt_device *dev, const char *prefix, enum lu_mgr_type type, struct seq_server_site *ss, const struct lu_env *env) { int rc, is_srv = (type == LUSTRE_SEQ_SERVER); ENTRY; LASSERT(dev != NULL); LASSERT(prefix != NULL); LASSERT(ss != NULL); LASSERT(ss->ss_lu != NULL); seq->lss_cli = NULL; seq->lss_type = type; seq->lss_site = ss; range_init(&seq->lss_space); range_init(&seq->lss_lowater_set); range_init(&seq->lss_hiwater_set); seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH; mutex_init(&seq->lss_mutex); seq->lss_width = is_srv ? LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH; snprintf(seq->lss_name, sizeof(seq->lss_name), "%s-%s", (is_srv ? "srv" : "ctl"), prefix); rc = seq_store_init(seq, env, dev); if (rc) GOTO(out, rc); /* Request backing store for saved sequence info. */ rc = seq_store_read(seq, env); if (rc == -ENODATA) { /* Nothing is read, init by default value. */ seq->lss_space = is_srv ? LUSTRE_SEQ_ZERO_RANGE: LUSTRE_SEQ_SPACE_RANGE; LASSERT(ss != NULL); seq->lss_space.lsr_index = ss->ss_node_id; LCONSOLE_INFO("%s: No data found " "on store. Initialize space\n", seq->lss_name); rc = seq_store_update(env, seq, NULL, 0); if (rc) { CERROR("%s: Can't write space data, " "rc %d\n", seq->lss_name, rc); } } else if (rc) { CERROR("%s: Can't read space data, rc %d\n", seq->lss_name, rc); GOTO(out, rc); } if (is_srv) { LASSERT(range_is_sane(&seq->lss_space)); } else { LASSERT(!range_is_zero(&seq->lss_space) && range_is_sane(&seq->lss_space)); } rc = seq_server_proc_init(seq); if (rc) GOTO(out, rc); EXIT; out: if (rc) seq_server_fini(seq, env); return rc; }
int seq_server_init(const struct lu_env *env, struct lu_server_seq *seq, struct dt_device *dev, const char *prefix, enum lu_mgr_type type, struct seq_server_site *ss) { int rc, is_srv = (type == LUSTRE_SEQ_SERVER); ENTRY; LASSERT(dev != NULL); LASSERT(prefix != NULL); LASSERT(ss != NULL); LASSERT(ss->ss_lu != NULL); /* A compile-time check for FIDs that used to be in lustre_idl.h * but is moved here to remove CLASSERT/LASSERT in that header. * Check all lu_fid fields are converted in fid_cpu_to_le() and friends * and that there is no padding added by compiler to the struct. */ { struct lu_fid tst; CLASSERT(sizeof(tst) == sizeof(tst.f_seq) + sizeof(tst.f_oid) + sizeof(tst.f_ver)); } seq->lss_cli = NULL; seq->lss_type = type; seq->lss_site = ss; lu_seq_range_init(&seq->lss_space); lu_seq_range_init(&seq->lss_lowater_set); lu_seq_range_init(&seq->lss_hiwater_set); seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH; mutex_init(&seq->lss_mutex); seq->lss_width = is_srv ? LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH; snprintf(seq->lss_name, sizeof(seq->lss_name), "%s-%s", (is_srv ? "srv" : "ctl"), prefix); rc = seq_store_init(seq, env, dev); if (rc) GOTO(out, rc); /* Request backing store for saved sequence info. */ rc = seq_store_read(seq, env); if (rc == -ENODATA) { /* Nothing is read, init by default value. */ seq->lss_space = is_srv ? LUSTRE_SEQ_ZERO_RANGE: LUSTRE_SEQ_SPACE_RANGE; seq->lss_space.lsr_index = ss->ss_node_id; LCONSOLE_INFO("%s: No data found " "on store. Initialize space\n", seq->lss_name); rc = seq_store_update(env, seq, NULL, 0); if (rc) { CERROR("%s: Can't write space data, " "rc %d\n", seq->lss_name, rc); } } else if (rc) { CERROR("%s: Can't read space data, rc %d\n", seq->lss_name, rc); GOTO(out, rc); } if (is_srv) { LASSERT(lu_seq_range_is_sane(&seq->lss_space)); } else { LASSERT(!lu_seq_range_is_zero(&seq->lss_space) && lu_seq_range_is_sane(&seq->lss_space)); } rc = seq_server_proc_init(seq); if (rc) GOTO(out, rc); EXIT; out: if (rc) seq_server_fini(seq, env); return rc; }