static int __seq_set_init(const struct lu_env *env, struct lu_server_seq *seq) { struct lu_seq_range *space = &seq->lss_space; int rc; range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width); range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width); rc = seq_store_update(env, seq, NULL, 1); return rc; }
static int __seq_server_alloc_super(struct lu_server_seq *seq, struct lu_seq_range *out, const struct lu_env *env) { struct lu_seq_range *space = &seq->lss_space; int rc; ENTRY; LASSERT(range_is_sane(space)); if (range_is_exhausted(space)) { CERROR("%s: Sequences space is exhausted\n", seq->lss_name); RETURN(-ENOSPC); } else { range_alloc(out, space, seq->lss_width); } rc = seq_store_update(env, seq, out, 1 /* sync */); LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n", seq->lss_name, rc, PRANGE(out)); RETURN(rc); }
/* * This function implements new seq allocation algorithm using async * updates to seq file on disk. ref bug 18857 for details. * there are four variable to keep track of this process * * lss_space; - available lss_space * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be * not yet committed * * when lss_lowater_set reaches the end it is replaced with hiwater one and * a write operation is initiated to allocate new hiwater range. * if last seq write opearion is still not commited, current operation is * flaged as sync write op. */ static int range_alloc_set(const struct lu_env *env, struct lu_seq_range *out, struct lu_server_seq *seq) { struct lu_seq_range *space = &seq->lss_space; struct lu_seq_range *loset = &seq->lss_lowater_set; struct lu_seq_range *hiset = &seq->lss_hiwater_set; int rc = 0; if (lu_seq_range_is_zero(loset)) __seq_set_init(env, seq); if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */ loset->lsr_start = loset->lsr_end; if (lu_seq_range_is_exhausted(loset)) { /* reached high water mark. */ struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev; int obd_num_clients = dev->ld_obd->obd_num_exports; __u64 set_sz; /* calculate new seq width based on number of clients */ set_sz = max(seq->lss_set_width, obd_num_clients * seq->lss_width); set_sz = min(lu_seq_range_space(space), set_sz); /* Switch to hiwater range now */ *loset = *hiset; /* allocate new hiwater range */ range_alloc(hiset, space, set_sz); /* update ondisk seq with new *space */ rc = seq_store_update(env, seq, NULL, seq->lss_need_sync); } LASSERTF(!lu_seq_range_is_exhausted(loset) || lu_seq_range_is_sane(loset), DRANGE"\n", PRANGE(loset)); if (rc == 0) range_alloc(out, loset, seq->lss_width); RETURN(rc); }