/* Start / stop recording */ static int mdd_changelog_on(const struct lu_env *env, struct mdd_device *mdd, int on) { int rc = 0; if ((on == 1) && ((mdd->mdd_cl.mc_flags & CLM_ON) == 0)) { LCONSOLE_INFO("%s: changelog on\n", mdd2obd_dev(mdd)->obd_name); if (mdd->mdd_cl.mc_flags & CLM_ERR) { CERROR("Changelogs cannot be enabled due to error " "condition (see %s log).\n", mdd2obd_dev(mdd)->obd_name); rc = -ESRCH; } else { spin_lock(&mdd->mdd_cl.mc_lock); mdd->mdd_cl.mc_flags |= CLM_ON; spin_unlock(&mdd->mdd_cl.mc_lock); rc = mdd_changelog_write_header(env, mdd, CLM_START); } } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) { LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name); rc = mdd_changelog_write_header(env, mdd, CLM_FINI); spin_lock(&mdd->mdd_cl.mc_lock); mdd->mdd_cl.mc_flags &= ~CLM_ON; spin_unlock(&mdd->mdd_cl.mc_lock); } return rc; }
int kgnilnd_tunables_init() { int rc = 0; #if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM kgnilnd_tunables.kgn_sysctl = cfs_register_sysctl_table(kgnilnd_top_ctl_table, 0); if (kgnilnd_tunables.kgn_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); #endif switch (*kgnilnd_tunables.kgn_checksum) { default: CERROR("Invalid checksum module parameter: %d\n", *kgnilnd_tunables.kgn_checksum); rc = -EINVAL; GOTO(out, rc); case GNILND_CHECKSUM_OFF: /* no checksumming */ break; case GNILND_CHECKSUM_SMSG_HEADER: LCONSOLE_INFO("SMSG header only checksumming enabled\n"); break; case GNILND_CHECKSUM_SMSG: LCONSOLE_INFO("SMSG checksumming enabled\n"); break; case GNILND_CHECKSUM_SMSG_BTE: LCONSOLE_INFO("SMSG + BTE checksumming enabled\n"); break; } if (*kgnilnd_tunables.kgn_max_immediate > GNILND_MAX_IMMEDIATE) { LCONSOLE_ERROR("kgnilnd module parameter 'max_immediate' too large %d > %d\n", *kgnilnd_tunables.kgn_max_immediate, GNILND_MAX_IMMEDIATE); rc = -EINVAL; GOTO(out, rc); } if (*kgnilnd_tunables.kgn_mbox_per_block < 1) { *kgnilnd_tunables.kgn_mbox_per_block = 1; } if (*kgnilnd_tunables.kgn_concurrent_sends == 0) { *kgnilnd_tunables.kgn_concurrent_sends = *kgnilnd_tunables.kgn_peer_credits; } else if (*kgnilnd_tunables.kgn_concurrent_sends > *kgnilnd_tunables.kgn_peer_credits) { LCONSOLE_ERROR("kgnilnd parameter 'concurrent_sends' too large: %d > %d (peer_credits)\n", *kgnilnd_tunables.kgn_concurrent_sends, *kgnilnd_tunables.kgn_peer_credits); rc = -EINVAL; } out: return rc; }
static int __seq_server_alloc_super(struct lu_server_seq *seq, struct lu_seq_range *out, const struct lu_env *env) { struct lu_seq_range *space = &seq->lss_space; int rc; ENTRY; LASSERT(range_is_sane(space)); if (range_is_exhausted(space)) { CERROR("%s: Sequences space is exhausted\n", seq->lss_name); RETURN(-ENOSPC); } else { range_alloc(out, space, seq->lss_width); } rc = seq_store_update(env, seq, out, 1 /* sync */); LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n", seq->lss_name, rc, PRANGE(out)); RETURN(rc); }
static int proc_toggle_rdmaq_override(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_val = kgnilnd_sysctl.ksd_rdmaq_override; int rc = 0; ENTRY; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } if (old_val != kgnilnd_sysctl.ksd_rdmaq_override) { long new_mb = kgnilnd_sysctl.ksd_rdmaq_override * (long)(1024*1024); LCONSOLE_INFO("changing RDMAQ override to %d mbytes/sec\n", kgnilnd_sysctl.ksd_rdmaq_override); /* override proc is mbytes, but we calc in bytes */ kgnilnd_data.kgn_rdmaq_override = new_mb; smp_wmb(); } RETURN(rc); }
int seq_server_alloc_spec(struct lu_server_seq *seq, struct lu_seq_range *spec, const struct lu_env *env) { struct lu_seq_range *space = &seq->lss_space; int rc = -ENOSPC; ENTRY; /* * In some cases (like recovery after a disaster) * we may need to allocate sequences manually * Notice some sequences can be lost if requested * range doesn't start at the beginning of current * free space. Also notice it's not possible now * to allocate sequences out of natural order. */ if (spec->lsr_start >= spec->lsr_end) RETURN(-EINVAL); if (spec->lsr_flags != LU_SEQ_RANGE_MDT && spec->lsr_flags != LU_SEQ_RANGE_OST) RETURN(-EINVAL); mutex_lock(&seq->lss_mutex); if (spec->lsr_start >= space->lsr_start) { space->lsr_start = spec->lsr_end; rc = seq_store_update(env, seq, spec, 1 /* sync */); LCONSOLE_INFO("%s: "DRANGE" sequences allocated: rc = %d \n", seq->lss_name, PRANGE(spec), rc); } mutex_unlock(&seq->lss_mutex); RETURN(rc); }
static int mdd_fill_fldb(const struct lu_env *env, struct mdd_device *mdd) { struct seq_server_site *ss = mdd_seq_site(mdd); struct lu_seq_range range; int rc; LASSERT(ss->ss_server_seq != NULL); LASSERT(ss->ss_server_fld != NULL); if (ss->ss_server_seq->lss_space.lsr_end == 0) return 0; memcpy(&range, &ss->ss_server_seq->lss_space, sizeof(range)); /* Pre-existing ZFS does not insert any entries to FLDB, we need * to insert it to FLDB during convertion */ range.lsr_start = FID_SEQ_NORMAL; fld_range_set_mdt(&range); mutex_lock(&ss->ss_server_fld->lsf_lock); rc = fld_insert_entry(env, ss->ss_server_fld, &range); mutex_unlock(&ss->ss_server_fld->lsf_lock); LCONSOLE_INFO("%s: insert missing range "DRANGE"\n", mdd2obd_dev(mdd)->obd_name, PRANGE(&range)); return rc; }
static int LL_PROC_PROTO(proc_toggle_rdmaq_override) { int old_val = kgnilnd_sysctl.ksd_rdmaq_override; int rc = 0; ENTRY; rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } if (old_val != kgnilnd_sysctl.ksd_rdmaq_override) { long new_mb = kgnilnd_sysctl.ksd_rdmaq_override * (long)(1024*1024); LCONSOLE_INFO("changing RDMAQ override to %d mbytes/sec\n", kgnilnd_sysctl.ksd_rdmaq_override); /* override proc is mbytes, but we calc in bytes */ kgnilnd_data.kgn_rdmaq_override = new_mb; smp_wmb(); } RETURN(rc); }
static ssize_t ldiskfs_osd_index_in_idif_seq_write(struct file *file, const char __user *buffer, size_t count, loff_t *off) { struct lu_env env; struct seq_file *m = file->private_data; struct dt_device *dt = m->private; struct osd_device *dev = osd_dt_dev(dt); struct lu_target *tgt; __s64 val; int rc; LASSERT(dev != NULL); if (unlikely(dev->od_mnt == NULL)) return -EINPROGRESS; rc = lprocfs_str_to_s64(buffer, count, &val); if (rc != 0) return rc; if (dev->od_index_in_idif) { if (val != 0) return count; LCONSOLE_WARN("%s: OST-index in IDIF has been enabled, " "it cannot be reverted back.\n", osd_name(dev)); return -EPERM; } if (val == 0) return count; rc = lu_env_init(&env, LCT_DT_THREAD); if (rc != 0) return rc; tgt = dev->od_dt_dev.dd_lu_dev.ld_site->ls_tgt; tgt->lut_lsd.lsd_feature_rocompat |= OBD_ROCOMPAT_IDX_IN_IDIF; rc = tgt_server_data_update(&env, tgt, 1); lu_env_fini(&env); if (rc < 0) return rc; LCONSOLE_INFO("%s: enable OST-index in IDIF successfully, " "it cannot be reverted back.\n", osd_name(dev)); dev->od_index_in_idif = 1; return count; }
/** * Performs cleanup procedures for passed \a obd given it is mgs obd. */ static int mgs_cleanup(struct obd_device *obd) { struct mgs_obd *mgs = &obd->u.mgs; ENTRY; if (mgs->mgs_sb == NULL) RETURN(0); mgs_fs_cleanup(obd); server_put_mount(obd->obd_name, mgs->mgs_vfsmnt); mgs->mgs_sb = NULL; ldlm_namespace_free(obd->obd_namespace, NULL, 1); obd->obd_namespace = NULL; fsfilt_put_ops(obd->obd_fsops); LCONSOLE_INFO("%s has stopped.\n", obd->obd_name); RETURN(0); }
int osp_precreate_rollover_new_seq(struct lu_env *env, struct osp_device *osp) { struct lu_fid *fid = &osp_env_info(env)->osi_fid; struct lu_fid *last_fid = &osp->opd_last_used_fid; int rc; ENTRY; rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq); if (rc != 0) { CERROR("%s: alloc fid error: rc = %d\n", osp->opd_obd->obd_name, rc); RETURN(rc); } fid->f_oid = 1; fid->f_ver = 0; LASSERTF(fid_seq(fid) != fid_seq(last_fid), "fid "DFID", last_fid "DFID"\n", PFID(fid), PFID(last_fid)); rc = osp_write_last_oid_seq_files(env, osp, fid, 1); if (rc != 0) { CERROR("%s: Can not update oid/seq file: rc = %d\n", osp->opd_obd->obd_name, rc); RETURN(rc); } LCONSOLE_INFO("%s: update sequence from "LPX64" to "LPX64"\n", osp->opd_obd->obd_name, fid_seq(last_fid), fid_seq(fid)); /* Update last_xxx to the new seq */ spin_lock(&osp->opd_pre_lock); osp->opd_last_used_fid = *fid; osp->opd_gap_start_fid = *fid; osp->opd_pre_used_fid = *fid; osp->opd_pre_last_created_fid = *fid; spin_unlock(&osp->opd_pre_lock); RETURN(rc); }
int seq_server_init(struct lu_server_seq *seq, struct dt_device *dev, const char *prefix, enum lu_mgr_type type, struct seq_server_site *ss, const struct lu_env *env) { int rc, is_srv = (type == LUSTRE_SEQ_SERVER); ENTRY; LASSERT(dev != NULL); LASSERT(prefix != NULL); LASSERT(ss != NULL); LASSERT(ss->ss_lu != NULL); seq->lss_cli = NULL; seq->lss_type = type; seq->lss_site = ss; range_init(&seq->lss_space); range_init(&seq->lss_lowater_set); range_init(&seq->lss_hiwater_set); seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH; mutex_init(&seq->lss_mutex); seq->lss_width = is_srv ? LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH; snprintf(seq->lss_name, sizeof(seq->lss_name), "%s-%s", (is_srv ? "srv" : "ctl"), prefix); rc = seq_store_init(seq, env, dev); if (rc) GOTO(out, rc); /* Request backing store for saved sequence info. */ rc = seq_store_read(seq, env); if (rc == -ENODATA) { /* Nothing is read, init by default value. */ seq->lss_space = is_srv ? LUSTRE_SEQ_ZERO_RANGE: LUSTRE_SEQ_SPACE_RANGE; LASSERT(ss != NULL); seq->lss_space.lsr_index = ss->ss_node_id; LCONSOLE_INFO("%s: No data found " "on store. Initialize space\n", seq->lss_name); rc = seq_store_update(env, seq, NULL, 0); if (rc) { CERROR("%s: Can't write space data, " "rc %d\n", seq->lss_name, rc); } } else if (rc) { CERROR("%s: Can't read space data, rc %d\n", seq->lss_name, rc); GOTO(out, rc); } if (is_srv) { LASSERT(range_is_sane(&seq->lss_space)); } else { LASSERT(!range_is_zero(&seq->lss_space) && range_is_sane(&seq->lss_space)); } rc = seq_server_proc_init(seq); if (rc) GOTO(out, rc); EXIT; out: if (rc) seq_server_fini(seq, env); return rc; }
static int ll_getxattr_common(struct inode *inode, const char *name, void *buffer, size_t size, __u64 valid) { struct ll_sb_info *sbi = ll_i2sbi(inode); struct ptlrpc_request *req = NULL; struct mdt_body *body; int xattr_type, rc; void *xdata; struct obd_capa *oc; struct rmtacl_ctl_entry *rce = NULL; CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino, inode->i_generation, inode); /* listxattr have slightly different behavior from of ext3: * without 'user_xattr' ext3 will list all xattr names but * filtered out "^user..*"; we list them all for simplicity. */ if (!name) { xattr_type = XATTR_OTHER_T; goto do_getxattr; } xattr_type = get_xattr_type(name); rc = xattr_type_filter(sbi, xattr_type); if (rc) return rc; /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && strcmp(name, "security.capability") == 0)) return -ENODATA; /* LU-549: Disable security.selinux when selinux is disabled */ if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() && strcmp(name, "security.selinux") == 0) return -EOPNOTSUPP; #ifdef CONFIG_FS_POSIX_ACL if (sbi->ll_flags & LL_SBI_RMT_CLIENT && (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); if (rce == NULL || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_LGETFACL && rce->rce_ops != RMT_RSETFACL && rce->rce_ops != RMT_RGETFACL)) return -EOPNOTSUPP; } /* posix acl is under protection of LOOKUP lock. when calling to this, * we just have path resolution to the target inode, so we have great * chance that cached ACL is uptodate. */ if (xattr_type == XATTR_ACL_ACCESS_T && !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) { struct ll_inode_info *lli = ll_i2info(inode); struct posix_acl *acl; spin_lock(&lli->lli_lock); acl = posix_acl_dup(lli->lli_posix_acl); spin_unlock(&lli->lli_lock); if (!acl) return -ENODATA; rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); posix_acl_release(acl); return rc; } if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode)) return -ENODATA; #endif do_getxattr: if (sbi->ll_xattr_cache_enabled && (rce == NULL || rce->rce_ops == RMT_LGETFACL || rce->rce_ops == RMT_LSETFACL)) { rc = ll_xattr_cache_get(inode, name, buffer, size, valid); if (rc < 0) GOTO(out_xattr, rc); } else { oc = ll_mdscapa_get(inode); rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc, valid | (rce ? rce_ops2valid(rce->rce_ops) : 0), name, NULL, 0, size, 0, &req); capa_put(oc); if (rc < 0) GOTO(out_xattr, rc); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); LASSERT(body); /* only detect the xattr size */ if (size == 0) GOTO(out, rc = body->eadatasize); if (size < body->eadatasize) { CERROR("server bug: replied size %u > %u\n", body->eadatasize, (int)size); GOTO(out, rc = -ERANGE); } if (body->eadatasize == 0) GOTO(out, rc = -ENODATA); /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, body->eadatasize); if (!xdata) GOTO(out, rc = -EFAULT); memcpy(buffer, xdata, body->eadatasize); rc = body->eadatasize; } #ifdef CONFIG_FS_POSIX_ACL if (rce && rce->rce_ops == RMT_LSETFACL) { ext_acl_xattr_header *acl; acl = lustre_posix_acl_xattr_2ext( (posix_acl_xattr_header *)buffer, rc); if (IS_ERR(acl)) GOTO(out, rc = PTR_ERR(acl)); rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode), xattr_type, acl); if (unlikely(rc < 0)) { lustre_ext_acl_xattr_free(acl); GOTO(out, rc); } } #endif out_xattr: if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) { LCONSOLE_INFO( "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n", ll_get_fsname(inode->i_sb, NULL, 0), rc); sbi->ll_flags &= ~LL_SBI_USER_XATTR; } out: ptlrpc_req_finished(req); return rc; }
static int ll_setxattr_common(struct inode *inode, const char *name, const void *value, size_t size, int flags, __u64 valid) { struct ll_sb_info *sbi = ll_i2sbi(inode); struct ptlrpc_request *req = NULL; int xattr_type, rc; struct obd_capa *oc; struct rmtacl_ctl_entry *rce = NULL; #ifdef CONFIG_FS_POSIX_ACL posix_acl_xattr_header *new_value = NULL; ext_acl_xattr_header *acl = NULL; #endif const char *pv = value; xattr_type = get_xattr_type(name); rc = xattr_type_filter(sbi, xattr_type); if (rc) return rc; /* b10667: ignore lustre special xattr for now */ if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) || (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0)) return 0; /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && strcmp(name, "security.capability") == 0)) return 0; /* LU-549: Disable security.selinux when selinux is disabled */ if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() && strcmp(name, "security.selinux") == 0) return -EOPNOTSUPP; #ifdef CONFIG_FS_POSIX_ACL if (sbi->ll_flags & LL_SBI_RMT_CLIENT && (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); if (rce == NULL || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_RSETFACL)) return -EOPNOTSUPP; if (rce->rce_ops == RMT_LSETFACL) { struct eacl_entry *ee; ee = et_search_del(&sbi->ll_et, current_pid(), ll_inode2fid(inode), xattr_type); LASSERT(ee != NULL); if (valid & OBD_MD_FLXATTR) { acl = lustre_acl_xattr_merge2ext( (posix_acl_xattr_header *)value, size, ee->ee_acl); if (IS_ERR(acl)) { ee_free(ee); return PTR_ERR(acl); } size = CFS_ACL_XATTR_SIZE(\ le32_to_cpu(acl->a_count), \ ext_acl_xattr); pv = (const char *)acl; } ee_free(ee); } else if (rce->rce_ops == RMT_RSETFACL) { size = lustre_posix_acl_xattr_filter( (posix_acl_xattr_header *)value, size, &new_value); if (unlikely(size < 0)) return size; pv = (const char *)new_value; } else return -EOPNOTSUPP; valid |= rce_ops2valid(rce->rce_ops); } #endif if (sbi->ll_xattr_cache_enabled && (rce == NULL || rce->rce_ops == RMT_LSETFACL)) { rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags); } else { oc = ll_mdscapa_get(inode); rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc, valid, name, pv, size, 0, flags, ll_i2suppgid(inode), &req); capa_put(oc); } #ifdef CONFIG_FS_POSIX_ACL if (new_value != NULL) lustre_posix_acl_xattr_free(new_value, size); if (acl != NULL) lustre_ext_acl_xattr_free(acl); #endif if (rc) { if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) { LCONSOLE_INFO("Disabling user_xattr feature because " "it is not supported on the server\n"); sbi->ll_flags &= ~LL_SBI_USER_XATTR; } return rc; } ptlrpc_req_finished(req); return 0; }
static int osp_precreate_thread(void *_arg) { struct osp_device *d = _arg; struct ptlrpc_thread *thread = &d->opd_pre_thread; struct l_wait_info lwi = { 0 }; struct lu_env env; int rc; ENTRY; rc = lu_env_init(&env, d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags); if (rc) { CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name, rc); RETURN(rc); } spin_lock(&d->opd_pre_lock); thread->t_flags = SVC_RUNNING; spin_unlock(&d->opd_pre_lock); wake_up(&thread->t_ctl_waitq); while (osp_precreate_running(d)) { /* * need to be connected to OST */ while (osp_precreate_running(d)) { l_wait_event(d->opd_pre_waitq, !osp_precreate_running(d) || d->opd_new_connection, &lwi); if (!d->opd_new_connection) continue; d->opd_new_connection = 0; d->opd_got_disconnected = 0; break; } if (!osp_precreate_running(d)) break; LASSERT(d->opd_obd->u.cli.cl_seq != NULL); if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL) { /* Get new sequence for client first */ LASSERT(d->opd_exp != NULL); d->opd_obd->u.cli.cl_seq->lcs_exp = class_export_get(d->opd_exp); rc = osp_init_pre_fid(d); if (rc != 0) { class_export_put(d->opd_exp); d->opd_obd->u.cli.cl_seq->lcs_exp = NULL; CERROR("%s: init pre fid error: rc = %d\n", d->opd_obd->obd_name, rc); continue; } } osp_statfs_update(d); /* * Clean up orphans or recreate missing objects. */ rc = osp_precreate_cleanup_orphans(&env, d); if (rc != 0) continue; /* * connected, can handle precreates now */ while (osp_precreate_running(d)) { l_wait_event(d->opd_pre_waitq, !osp_precreate_running(d) || osp_precreate_near_empty(&env, d) || osp_statfs_need_update(d) || d->opd_got_disconnected, &lwi); if (!osp_precreate_running(d)) break; /* something happened to the connection * have to start from the beginning */ if (d->opd_got_disconnected) break; if (osp_statfs_need_update(d)) osp_statfs_update(d); /* To avoid handling different seq in precreate/orphan * cleanup, it will hold precreate until current seq is * used up. */ if (unlikely(osp_precreate_end_seq(&env, d) && !osp_create_end_seq(&env, d))) continue; if (unlikely(osp_precreate_end_seq(&env, d) && osp_create_end_seq(&env, d))) { LCONSOLE_INFO("%s:"LPX64" is used up." " Update to new seq\n", d->opd_obd->obd_name, fid_seq(&d->opd_pre_last_created_fid)); rc = osp_precreate_rollover_new_seq(&env, d); if (rc) continue; } if (osp_precreate_near_empty(&env, d)) { rc = osp_precreate_send(&env, d); /* osp_precreate_send() sets opd_pre_status * in case of error, that prevent the using of * failed device. */ if (rc < 0 && rc != -ENOSPC && rc != -ETIMEDOUT && rc != -ENOTCONN) CERROR("%s: cannot precreate objects:" " rc = %d\n", d->opd_obd->obd_name, rc); } } } thread->t_flags = SVC_STOPPED; lu_env_fini(&env); wake_up(&thread->t_ctl_waitq); RETURN(0); }
int seq_server_init(const struct lu_env *env, struct lu_server_seq *seq, struct dt_device *dev, const char *prefix, enum lu_mgr_type type, struct seq_server_site *ss) { int rc, is_srv = (type == LUSTRE_SEQ_SERVER); ENTRY; LASSERT(dev != NULL); LASSERT(prefix != NULL); LASSERT(ss != NULL); LASSERT(ss->ss_lu != NULL); /* A compile-time check for FIDs that used to be in lustre_idl.h * but is moved here to remove CLASSERT/LASSERT in that header. * Check all lu_fid fields are converted in fid_cpu_to_le() and friends * and that there is no padding added by compiler to the struct. */ { struct lu_fid tst; CLASSERT(sizeof(tst) == sizeof(tst.f_seq) + sizeof(tst.f_oid) + sizeof(tst.f_ver)); } seq->lss_cli = NULL; seq->lss_type = type; seq->lss_site = ss; lu_seq_range_init(&seq->lss_space); lu_seq_range_init(&seq->lss_lowater_set); lu_seq_range_init(&seq->lss_hiwater_set); seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH; mutex_init(&seq->lss_mutex); seq->lss_width = is_srv ? LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH; snprintf(seq->lss_name, sizeof(seq->lss_name), "%s-%s", (is_srv ? "srv" : "ctl"), prefix); rc = seq_store_init(seq, env, dev); if (rc) GOTO(out, rc); /* Request backing store for saved sequence info. */ rc = seq_store_read(seq, env); if (rc == -ENODATA) { /* Nothing is read, init by default value. */ seq->lss_space = is_srv ? LUSTRE_SEQ_ZERO_RANGE: LUSTRE_SEQ_SPACE_RANGE; seq->lss_space.lsr_index = ss->ss_node_id; LCONSOLE_INFO("%s: No data found " "on store. Initialize space\n", seq->lss_name); rc = seq_store_update(env, seq, NULL, 0); if (rc) { CERROR("%s: Can't write space data, " "rc %d\n", seq->lss_name, rc); } } else if (rc) { CERROR("%s: Can't read space data, rc %d\n", seq->lss_name, rc); GOTO(out, rc); } if (is_srv) { LASSERT(lu_seq_range_is_sane(&seq->lss_space)); } else { LASSERT(!lu_seq_range_is_zero(&seq->lss_space) && lu_seq_range_is_sane(&seq->lss_space)); } rc = seq_server_proc_init(seq); if (rc) GOTO(out, rc); EXIT; out: if (rc) seq_server_fini(seq, env); return rc; }
int mdd_compat_fixes(const struct lu_env *env, struct mdd_device *mdd) { struct mdd_thread_info *info = mdd_env_info(env); struct mdd_object *root; struct dt_object *o; struct lustre_mdt_attrs *lma; struct lu_buf buf; int rc; ENTRY; /* IGIF FIDS are valid for old 1.8 and 2.[123] ROOT and are kept. * Normal FIDs used by Xyratex 1.8->2.1 upgrade tool are also kept. */ if (fid_is_igif(&mdd->mdd_root_fid) || fid_is_norm(&mdd->mdd_root_fid)) RETURN(0); /* * FID is supposed to be FID_SEQ_ROOT for: * - new ldiskfs fs * - new ZFS fs * - old ZFS fs, by now processed with osd_convert_root_to_new_seq() */ if (fid_seq(&mdd->mdd_root_fid) != FID_SEQ_ROOT) { CERROR("%s: wrong FID "DFID" is used for /ROOT\n", mdd2obd_dev(mdd)->obd_name, PFID(&mdd->mdd_root_fid)); RETURN(-EINVAL); } root = mdd_object_find(env, mdd, &mdd->mdd_root_fid); if (IS_ERR(root)) RETURN(PTR_ERR(root)); o = mdd_object_child(root); CDEBUG(D_OTHER, "/ROOT = "DFID"\n", PFID(&mdd->mdd_root_fid)); if (dt_try_as_dir(env, o) == 0) { CERROR("%s: not a directory\n", mdd2obd_dev(mdd)->obd_name); GOTO(out, rc = -ENOTDIR); } lma = (struct lustre_mdt_attrs *)&info->mti_xattr_buf; CLASSERT(sizeof(info->mti_xattr_buf) >= LMA_OLD_SIZE); buf.lb_len = LMA_OLD_SIZE; buf.lb_buf = lma; rc = mdo_xattr_get(env, root, &buf, XATTR_NAME_LMA, BYPASS_CAPA); if (rc < 0 && rc != -ENODATA) { CERROR("%s: can't fetch LMA: rc = %d\n", mdd2obd_dev(mdd)->obd_name, rc); GOTO(out, rc); } lustre_lma_swab(lma); if (lu_fid_eq(&lma->lma_self_fid, &mdd->mdd_root_fid)) { /* /ROOT has been converted already * or was correct from the beginning */ CDEBUG(D_OTHER, "%s: converted already\n", mdd2obd_dev(mdd)->obd_name); GOTO(out, rc = 0); } /* this is supposed to happen only on pre-production ZFS backend */ if (strcmp(mdd->mdd_bottom->dd_lu_dev.ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) != 0) { CERROR("%s: "DFID" is used on ldiskfs?!\n", mdd2obd_dev(mdd)->obd_name, PFID(&mdd->mdd_root_fid)); GOTO(out, rc = -ENOTSUPP); } LCONSOLE_INFO("%s: FID of /ROOT has been changed. " "Please remount the clients.\n", mdd2obd_dev(mdd)->obd_name); /* Fill FLDB first */ rc = mdd_fill_fldb(env, mdd); if (rc) GOTO(out, rc); /* remove ./.. from /ROOT */ rc = mdd_convert_remove_dots(env, mdd, root); if (rc) GOTO(out, rc); /* go over the directory, fix all the objects */ rc = mdd_fix_children(env, mdd, o); if (rc) GOTO(out, rc); /* Update LMA on /ROOT. Done for simplicity in MDD, not in osd-zfs. * Correct LMA will imply the whole directory has been coverted * successfully, otherwise it will be retried on next mount. */ rc = mdd_convert_lma(env, mdd, root); out: mdd_object_put(env, root); RETURN(rc); }