struct lustre_sb_info *lustre_init_lsi(struct super_block *sb) { struct lustre_sb_info *lsi; OBD_ALLOC_PTR(lsi); if (!lsi) return NULL; OBD_ALLOC_PTR(lsi->lsi_lmd); if (!lsi->lsi_lmd) { OBD_FREE_PTR(lsi); return NULL; } lsi->lsi_lmd->lmd_exclude_count = 0; lsi->lsi_lmd->lmd_recovery_time_soft = 0; lsi->lsi_lmd->lmd_recovery_time_hard = 0; s2lsi_nocast(sb) = lsi; /* we take 1 extra ref for our setup */ atomic_set(&lsi->lsi_mounts, 1); /* Default umount style */ lsi->lsi_flags = LSI_UMOUNT_FAILOVER; return lsi; }
static struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec, struct vfs_cred *vcred) { struct ptlrpc_cli_ctx *ctx; struct gss_cli_ctx_keyring *gctx_kr; OBD_ALLOC_PTR(gctx_kr); if (gctx_kr == NULL) return NULL; OBD_ALLOC_PTR(gctx_kr->gck_timer); if (gctx_kr->gck_timer == NULL) { OBD_FREE_PTR(gctx_kr); return NULL; } init_timer(gctx_kr->gck_timer); ctx = &gctx_kr->gck_base.gc_base; if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) { OBD_FREE_PTR(gctx_kr->gck_timer); OBD_FREE_PTR(gctx_kr); return NULL; } ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT; clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); atomic_inc(&ctx->cc_refcount); /* for the caller */ return ctx; }
static struct lu_device *lov_device_alloc(const struct lu_env *env, struct lu_device_type *t, struct lustre_cfg *cfg) { struct lu_device *d; struct lov_device *ld; struct obd_device *obd; int rc; OBD_ALLOC_PTR(ld); if (ld == NULL) return ERR_PTR(-ENOMEM); cl_device_init(&ld->ld_cl, t); d = lov2lu_dev(ld); d->ld_ops = &lov_lu_ops; ld->ld_cl.cd_ops = &lov_cl_ops; mutex_init(&ld->ld_mutex); lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class); /* setup the LOV OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); LASSERT(obd != NULL); rc = lov_setup(obd, cfg); if (rc) { lov_device_free(env, d); return ERR_PTR(rc); } ld->ld_lov = &obd->u.lov; return d; }
/** * create fld cache. */ struct fld_cache *fld_cache_init(const char *name, int cache_size, int cache_threshold) { struct fld_cache *cache; ENTRY; LASSERT(name != NULL); LASSERT(cache_threshold < cache_size); OBD_ALLOC_PTR(cache); if (cache == NULL) RETURN(ERR_PTR(-ENOMEM)); INIT_LIST_HEAD(&cache->fci_entries_head); INIT_LIST_HEAD(&cache->fci_lru); cache->fci_cache_count = 0; rwlock_init(&cache->fci_lock); strlcpy(cache->fci_name, name, sizeof(cache->fci_name)); cache->fci_cache_size = cache_size; cache->fci_threshold = cache_threshold; /* Init fld cache info. */ memset(&cache->fci_stat, 0, sizeof(cache->fci_stat)); CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n", cache->fci_name, cache_size, cache_threshold); RETURN(cache); }
/** Send a DONE_WRITING rpc. */ static void ll_done_writing(struct inode *inode) { struct obd_client_handle *och = NULL; struct md_op_data *op_data; int rc; LASSERT(exp_connect_som(ll_i2mdexp(inode))); OBD_ALLOC_PTR(op_data); if (op_data == NULL) { CERROR("can't allocate op_data\n"); return; } ll_prepare_done_writing(inode, op_data, &och); /* If there is no @och, we do not do D_W yet. */ if (och == NULL) GOTO(out, 0); rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL); if (rc == -EAGAIN) { /* MDS has instructed us to obtain Size-on-MDS attribute from * OSTs and send setattr to back to MDS. */ rc = ll_som_update(inode, op_data); } else if (rc) { CERROR("inode %lu mdc done_writing failed: rc = %d\n", inode->i_ino, rc); } out: ll_finish_md_op_data(op_data); if (och) { md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och); OBD_FREE_PTR(och); } }
static struct lu_device *osc_device_alloc(const struct lu_env *env, struct lu_device_type *t, struct lustre_cfg *cfg) { struct lu_device *d; struct osc_device *od; struct obd_device *obd; int rc; OBD_ALLOC_PTR(od); if (od == NULL) RETURN(ERR_PTR(-ENOMEM)); cl_device_init(&od->od_cl, t); d = osc2lu_dev(od); d->ld_ops = &osc_lu_ops; od->od_cl.cd_ops = &osc_cl_ops; /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); LASSERT(obd != NULL); rc = osc_setup(obd, cfg); if (rc) { osc_device_free(env, d); RETURN(ERR_PTR(rc)); } od->od_exp = obd->obd_self_export; RETURN(d); }
static int __mdd_orphan_cleanup(void *args) { struct mdd_generic_thread *thread = (struct mdd_generic_thread *)args; struct lu_env *env = NULL; int rc; ENTRY; complete(&thread->mgt_started); OBD_ALLOC_PTR(env); if (env == NULL) GOTO(out, rc = -ENOMEM); rc = lu_env_init(env, LCT_MD_THREAD); if (rc) GOTO(out, rc); rc = orph_index_iterate(env, thread); lu_env_fini(env); GOTO(out, rc); out: if (env) OBD_FREE_PTR(env); complete(&thread->mgt_finished); return rc; }
/* * Allocate quota master target and initialize it. * * \param env - is the environment passed by the caller * \param ldt - is the device type structure associated with the qmt * \param cfg - is the configuration record used to configure the qmt * * \retval - lu_device structure associated with the qmt on success, * appropriate error on failure */ static struct lu_device *qmt_device_alloc(const struct lu_env *env, struct lu_device_type *ldt, struct lustre_cfg *cfg) { struct qmt_device *qmt; struct lu_device *ld; int rc; ENTRY; /* allocate qmt device */ OBD_ALLOC_PTR(qmt); if (qmt == NULL) RETURN(ERR_PTR(-ENOMEM)); /* configure lu/dt_device */ ld = qmt2lu_dev(qmt); dt_device_init(&qmt->qmt_dt_dev, ldt); ld->ld_ops = &qmt_lu_ops; /* initialize qmt device */ rc = qmt_device_init0(env, qmt, ldt, cfg); if (rc != 0) { qmt_device_free(env, ld); RETURN(ERR_PTR(rc)); } RETURN(ld); }
int ll_set_dd(struct dentry *de) { ENTRY; LASSERT(de != NULL); CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n", de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode, atomic_read(&de->d_count)); if (de->d_fsdata == NULL) { struct ll_dentry_data *lld; OBD_ALLOC_PTR(lld); if (likely(lld != NULL)) { CFS_INIT_LIST_HEAD(&lld->lld_sa_alias); lock_dentry(de); if (likely(de->d_fsdata == NULL)) de->d_fsdata = lld; else OBD_FREE_PTR(lld); unlock_dentry(de); } else { RETURN(-ENOMEM); } } RETURN(0); }
static int mds_llog_add_unlink(struct obd_device *obd, struct lov_stripe_md *lsm, obd_count count, struct llog_cookie *logcookie, int cookies) { struct llog_unlink_rec *lur; struct llog_ctxt *ctxt; int rc; if (cookies < lsm->lsm_stripe_count) RETURN(rc = -EFBIG); /* first prepare unlink log record */ OBD_ALLOC_PTR(lur); if (!lur) RETURN(rc = -ENOMEM); lur->lur_hdr.lrh_len = lur->lur_tail.lrt_len = sizeof(*lur); lur->lur_hdr.lrh_type = MDS_UNLINK_REC; lur->lur_count = count; ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT); rc = llog_add(ctxt, &lur->lur_hdr, lsm, logcookie, cookies); llog_ctxt_put(ctxt); OBD_FREE_PTR(lur); RETURN(rc); }
int client_fid_init(struct obd_device *obd, struct obd_export *exp, enum lu_cli_type type) { struct client_obd *cli = &obd->u.cli; char *prefix; int rc; ENTRY; OBD_ALLOC_PTR(cli->cl_seq); if (cli->cl_seq == NULL) RETURN(-ENOMEM); OBD_ALLOC(prefix, MAX_OBD_NAME + 5); if (prefix == NULL) GOTO(out_free_seq, rc = -ENOMEM); snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name); /* Init client side sequence-manager */ rc = seq_client_init(cli->cl_seq, exp, type, prefix, NULL); OBD_FREE(prefix, MAX_OBD_NAME + 5); if (rc) GOTO(out_free_seq, rc); RETURN(rc); out_free_seq: OBD_FREE_PTR(cli->cl_seq); cli->cl_seq = NULL; return rc; }
/* * Write a global record * * \param env - is the environment passed by the caller * \param obj - is the on-disk global index to be updated * \param id - index to be updated * \param rec - record to be written */ int lquota_disk_write_glb(const struct lu_env *env, struct dt_object *obj, __u64 id, struct lquota_glb_rec *rec) { struct dt_device *dev = lu2dt_dev(obj->do_lu.lo_dev); struct thandle *th; struct dt_key *key = (struct dt_key *)&id; int rc; ENTRY; th = dt_trans_create(env, dev); if (IS_ERR(th)) RETURN(PTR_ERR(th)); /* the entry with 0 key can always be found in IAM file. */ if (id == 0) { rc = dt_declare_delete(env, obj, key, th); if (rc) GOTO(out, rc); } rc = dt_declare_insert(env, obj, (struct dt_rec *)rec, key, th); if (rc) GOTO(out, rc); rc = dt_trans_start_local(env, dev, th); if (rc) GOTO(out, rc); dt_write_lock(env, obj, 0); if (id == 0) { struct lquota_glb_rec *tmp; OBD_ALLOC_PTR(tmp); if (tmp == NULL) GOTO(out_lock, rc = -ENOMEM); rc = dt_lookup(env, obj, (struct dt_rec *)tmp, key, BYPASS_CAPA); OBD_FREE_PTR(tmp); if (rc == 0) { rc = dt_delete(env, obj, key, th, BYPASS_CAPA); if (rc) GOTO(out_lock, rc); } rc = 0; } rc = dt_insert(env, obj, (struct dt_rec *)rec, key, th, BYPASS_CAPA, 1); out_lock: dt_write_unlock(env, obj); out: dt_trans_stop(env, dev, th); RETURN(rc); }
static struct lustre_qunit_size * quota_create_lqs(unsigned long long lqs_key, struct lustre_quota_ctxt *qctxt) { struct lustre_qunit_size *lqs = NULL; cfs_hash_t *hs = NULL; int rc = 0; OBD_ALLOC_PTR(lqs); if (!lqs) GOTO(out, rc = -ENOMEM); lqs->lqs_key = lqs_key; cfs_spin_lock_init(&lqs->lqs_lock); lqs->lqs_bwrite_pending = 0; lqs->lqs_iwrite_pending = 0; lqs->lqs_ino_rec = 0; lqs->lqs_blk_rec = 0; lqs->lqs_id = LQS_KEY_ID(lqs->lqs_key); lqs->lqs_flags = LQS_KEY_GRP(lqs->lqs_key) ? LQUOTA_FLAGS_GRP : 0; lqs->lqs_bunit_sz = qctxt->lqc_bunit_sz; lqs->lqs_iunit_sz = qctxt->lqc_iunit_sz; lqs->lqs_btune_sz = qctxt->lqc_btune_sz; lqs->lqs_itune_sz = qctxt->lqc_itune_sz; lqs->lqs_ctxt = qctxt; if (qctxt->lqc_handler) { lqs->lqs_last_bshrink = 0; lqs->lqs_last_ishrink = 0; } lqs_initref(lqs); cfs_spin_lock(&qctxt->lqc_lock); if (qctxt->lqc_valid) hs = cfs_hash_getref(qctxt->lqc_lqs_hash); cfs_spin_unlock(&qctxt->lqc_lock); if (hs) { lqs_getref(lqs); rc = cfs_hash_add_unique(qctxt->lqc_lqs_hash, &lqs->lqs_key, &lqs->lqs_hash); if (rc) lqs_putref(lqs); cfs_hash_putref(hs); } else { rc = -EBUSY; } out: if (rc && lqs) OBD_FREE_PTR(lqs); if (rc) return ERR_PTR(rc); else return lqs; }
void *slp_session_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct slp_session *session; OBD_ALLOC_PTR(session); if (session == NULL) session = ERR_PTR(-ENOMEM); return session; }
int osp_init_precreate(struct osp_device *d) { struct l_wait_info lwi = { 0 }; struct task_struct *task; ENTRY; OBD_ALLOC_PTR(d->opd_pre); if (d->opd_pre == NULL) RETURN(-ENOMEM); /* initially precreation isn't ready */ d->opd_pre_status = -EAGAIN; fid_zero(&d->opd_pre_used_fid); d->opd_pre_used_fid.f_oid = 1; fid_zero(&d->opd_pre_last_created_fid); d->opd_pre_last_created_fid.f_oid = 1; d->opd_pre_reserved = 0; d->opd_got_disconnected = 1; d->opd_pre_grow_slow = 0; d->opd_pre_grow_count = OST_MIN_PRECREATE; d->opd_pre_min_grow_count = OST_MIN_PRECREATE; d->opd_pre_max_grow_count = OST_MAX_PRECREATE; spin_lock_init(&d->opd_pre_lock); init_waitqueue_head(&d->opd_pre_waitq); init_waitqueue_head(&d->opd_pre_user_waitq); init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq); /* * Initialize statfs-related things */ d->opd_statfs_maxage = 5; /* default update interval */ d->opd_statfs_fresh_till = cfs_time_shift(-1000); CDEBUG(D_OTHER, "current %llu, fresh till %llu\n", (unsigned long long)cfs_time_current(), (unsigned long long)d->opd_statfs_fresh_till); cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d); /* * start thread handling precreation and statfs updates */ task = kthread_run(osp_precreate_thread, d, "osp-pre-%u-%u", d->opd_index, d->opd_group); if (IS_ERR(task)) { CERROR("can't start precreate thread %ld\n", PTR_ERR(task)); RETURN(PTR_ERR(task)); } l_wait_event(d->opd_pre_thread.t_ctl_waitq, osp_precreate_running(d) || osp_precreate_stopped(d), &lwi); RETURN(0); }
/** * Open an OI(Ojbect Index) container. * * \param name Name of OI container * \param objp Pointer of returned OI * * \retval 0 success * \retval -ve failure */ static int osd_oi_open(struct osd_thread_info *info, struct osd_device *osd, char *name, struct osd_oi **oi_slot, bool create) { struct osd_directory *dir; struct iam_container *bag; struct inode *inode; struct osd_oi *oi; int rc; ENTRY; oi_feat.dif_keysize_min = sizeof(struct lu_fid); oi_feat.dif_keysize_max = sizeof(struct lu_fid); inode = osd_oi_index_open(info, osd, name, &oi_feat, create); if (IS_ERR(inode)) RETURN(PTR_ERR(inode)); ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NO_OI); /* 'What the @fid is' is not imporatant, because these objects * have no OI mappings, and only are visible inside the OSD.*/ lu_igif_build(&info->oti_fid, inode->i_ino, inode->i_generation); rc = osd_ea_fid_set(info, inode, &info->oti_fid, LMAC_NOT_IN_OI, 0); if (rc != 0) GOTO(out_inode, rc); OBD_ALLOC_PTR(oi); if (oi == NULL) GOTO(out_inode, rc = -ENOMEM); oi->oi_inode = inode; dir = &oi->oi_dir; bag = &dir->od_container; rc = iam_container_init(bag, &dir->od_descr, inode); if (rc < 0) GOTO(out_free, rc); rc = iam_container_setup(bag); if (rc < 0) GOTO(out_container, rc); *oi_slot = oi; RETURN(0); out_container: iam_container_fini(bag); out_free: OBD_FREE_PTR(oi); out_inode: iput(inode); return rc; }
int llog_process_or_fork(const struct lu_env *env, struct llog_handle *loghandle, llog_cb_t cb, void *data, void *catdata, bool fork) { struct llog_process_info *lpi; int rc; ENTRY; OBD_ALLOC_PTR(lpi); if (lpi == NULL) { CERROR("cannot alloc pointer\n"); RETURN(-ENOMEM); } lpi->lpi_loghandle = loghandle; lpi->lpi_cb = cb; lpi->lpi_cbdata = data; lpi->lpi_catdata = catdata; #ifdef __KERNEL__ if (fork) { struct task_struct *task; /* The new thread can't use parent env, * init the new one in llog_process_thread_daemonize. */ lpi->lpi_env = NULL; init_completion(&lpi->lpi_completion); task = kthread_run(llog_process_thread_daemonize, lpi, "llog_process_thread"); if (IS_ERR(task)) { rc = PTR_ERR(task); CERROR("%s: cannot start thread: rc = %d\n", loghandle->lgh_ctxt->loc_obd->obd_name, rc); GOTO(out_lpi, rc); } wait_for_completion(&lpi->lpi_completion); } else { lpi->lpi_env = env; llog_process_thread(lpi); } #else lpi->lpi_env = env; llog_process_thread(lpi); #endif rc = lpi->lpi_rc; #ifdef __KERNEL__ out_lpi: #endif OBD_FREE_PTR(lpi); RETURN(rc); }
/* helper functions for calling the llog obd methods */ static struct llog_ctxt* llog_new_ctxt(struct obd_device *obd) { struct llog_ctxt *ctxt; OBD_ALLOC_PTR(ctxt); if (!ctxt) return NULL; ctxt->loc_obd = obd; atomic_set(&ctxt->loc_refcount, 1); return ctxt; }
static struct llu_io_group * get_io_group(struct inode *inode, int maxpages, struct lustre_rw_params *params) { struct llu_io_group *group; OBD_ALLOC_PTR(group); if (!group) return ERR_PTR(-ENOMEM); group->lig_params = params; return group; }
static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops) { struct rmtacl_ctl_entry *rce; OBD_ALLOC_PTR(rce); if (!rce) return NULL; INIT_LIST_HEAD(&rce->rce_list); rce->rce_key = key; rce->rce_ops = ops; return rce; }
/* * Allocate a new log or catalog handle * Used inside llog_open(). */ struct llog_handle *llog_alloc_handle(void) { struct llog_handle *loghandle; OBD_ALLOC_PTR(loghandle); if (loghandle == NULL) return NULL; init_rwsem(&loghandle->lgh_lock); spin_lock_init(&loghandle->lgh_hdr_lock); INIT_LIST_HEAD(&loghandle->u.phd.phd_entry); atomic_set(&loghandle->lgh_refcount, 1); return loghandle; }
/* * directory structure on legacy OST: * * O/<seq>/d0-31/<objid> * O/<seq>/LAST_ID * last_rcvd * LAST_GROUP * CONFIGS * */ int osd_compat_init(struct osd_device *dev) { struct lvfs_run_ctxt new; struct lvfs_run_ctxt save; struct dentry *rootd = osd_sb(dev)->s_root; struct dentry *d; int rc; int i; ENTRY; /* to get subdir count from last_rcvd */ rc = osd_last_rcvd_subdir_count(dev); if (rc <= 0) RETURN(rc); dev->od_ost_map->subdir_count = rc; rc = 0; OBD_ALLOC_PTR(dev->od_ost_map); if (dev->od_ost_map == NULL) RETURN(-ENOMEM); LASSERT(dev->od_fsops); osd_push_ctxt(dev, &new, &save); d = simple_mkdir(rootd, dev->od_mnt, "O", 0755, 1); pop_ctxt(&save, &new, NULL); if (IS_ERR(d)) { OBD_FREE_PTR(dev->od_ost_map); RETURN(PTR_ERR(d)); } dev->od_ost_map->root = d; /* Initialize all groups */ for (i = 0; i < MAX_OBJID_GROUP; i++) { cfs_sema_init(&dev->od_ost_map->groups[i].dir_init_sem, 1); rc = osd_compat_seq_init(dev, i); if (rc) { osd_compat_fini(dev); break; } } RETURN(rc); }
struct inode *search_inode_for_lustre(struct super_block *sb, const struct lu_fid *fid) { struct ll_sb_info *sbi = ll_s2sbi(sb); struct ptlrpc_request *req = NULL; struct inode *inode = NULL; int eadatalen = 0; unsigned long hash = cl_fid_build_ino(fid, ll_need_32bit_api(sbi)); struct md_op_data *op_data; int rc; CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid)); inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid); if (inode) return inode; rc = ll_get_max_mdsize(sbi, &eadatalen); if (rc) return ERR_PTR(rc); /* Because inode is NULL, ll_prep_md_op_data can not * be used here. So we allocate op_data ourselves */ OBD_ALLOC_PTR(op_data); if (op_data == NULL) return ERR_PTR(-ENOMEM); op_data->op_fid1 = *fid; op_data->op_mode = eadatalen; op_data->op_valid = OBD_MD_FLEASIZE; /* mds_fid2dentry ignores f_type */ rc = md_getattr(sbi->ll_md_exp, op_data, &req); OBD_FREE_PTR(op_data); if (rc) { CERROR("can't get object attrs, fid "DFID", rc %d\n", PFID(fid), rc); return ERR_PTR(rc); } rc = ll_prep_inode(&inode, req, sb, NULL); ptlrpc_req_finished(req); if (rc) return ERR_PTR(rc); return inode; }
/** * Initialize osd Iterator for given osd index object. * * \param dt - osd index object * \param attr - not used * \param capa - BYPASS_CAPA */ static struct dt_it *osd_it_acct_init(const struct lu_env *env, struct dt_object *dt, __u32 attr, struct lustre_capa *capa) { struct osd_thread_info *info = osd_oti_get(env); struct osd_it_quota *it; struct lu_object *lo = &dt->do_lu; struct osd_device *osd = osd_dev(lo->lo_dev); int rc; ENTRY; LASSERT(lu_object_exists(lo)); if (info == NULL) RETURN(ERR_PTR(-ENOMEM)); if (info->oti_it_inline) { OBD_ALLOC_PTR(it); if (it == NULL) RETURN(ERR_PTR(-ENOMEM)); } else { it = &info->oti_it_quota; info->oti_it_inline = 1; } memset(it, 0, sizeof(*it)); it->oiq_oid = osd_quota_fid2dmu(lu_object_fid(lo)); /* initialize zap cursor */ rc = osd_zap_cursor_init(&it->oiq_zc, osd->od_os, it->oiq_oid, 0); if (rc != 0) { if (it != &info->oti_it_quota) OBD_FREE_PTR(it); else info->oti_it_inline = 0; RETURN(ERR_PTR(rc)); } /* take object reference */ lu_object_get(lo); it->oiq_obj = osd_dt_obj(dt); it->oiq_reset = 1; RETURN((struct dt_it *)it); }
static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type, ext_acl_xattr_header *header) { struct eacl_entry *ee; OBD_ALLOC_PTR(ee); if (!ee) return NULL; INIT_LIST_HEAD(&ee->ee_list); ee->ee_key = key; ee->ee_fid = *fid; ee->ee_type = type; ee->ee_acl = header; return ee; }
/** Get a config log from the MGS and process it. * This func is called for both clients and servers. * Continue to process new statements appended to the logs * (whenever the config lock is revoked) until lustre_end_log * is called. * @param sb The superblock is used by the MGC to write to the local copy of * the config log * @param logname The name of the llog to replicate from the MGS * @param cfg Since the same mgc may be used to follow multiple config logs * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for * this log, and is added to the mgc's list of logs to follow. */ int lustre_process_log(struct super_block *sb, char *logname, struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs *bufs; struct lustre_sb_info *lsi = s2lsi(sb); struct obd_device *mgc = lsi->lsi_mgc; int rc; LASSERT(mgc); LASSERT(cfg); OBD_ALLOC_PTR(bufs); if (bufs == NULL) return -ENOMEM; /* mgc_process_config */ lustre_cfg_bufs_reset(bufs, mgc->obd_name); lustre_cfg_bufs_set_string(bufs, 1, logname); lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg)); lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb)); lcfg = lustre_cfg_new(LCFG_LOG_START, bufs); rc = obd_process_config(mgc, sizeof(*lcfg), lcfg); lustre_cfg_free(lcfg); OBD_FREE_PTR(bufs); if (rc == -EINVAL) LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s'" "failed from the MGS (%d). Make sure this " "client and the MGS are running compatible " "versions of Lustre.\n", mgc->obd_name, logname, rc); if (rc) LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' " "failed (%d). This may be the result of " "communication errors between this node and " "the MGS, a bad configuration, or other " "errors. See the syslog for more " "information.\n", mgc->obd_name, logname, rc); /* class_obd_list(); */ return rc; }
static __u32 gss_import_sec_context_null(rawobj_t *inbuf, struct gss_ctx *gss_context) { struct null_ctx *null_context; if (inbuf == NULL || inbuf->data == NULL) return GSS_S_FAILURE; OBD_ALLOC_PTR(null_context); if (null_context == NULL) return GSS_S_FAILURE; gss_context->internal_ctx_id = null_context; CDEBUG(D_SEC, "successfully imported null context\n"); return GSS_S_COMPLETE; }
/** * Add new item for parent FID verification. * * Prepare new verification item and pass it to the dedicated * verification thread for further processing. * * \param[in] env execution environment * \param[in] fo OFD object * \param[in] oa OBDO structure with PFID */ static void ofd_add_inconsistency_item(const struct lu_env *env, struct ofd_object *fo, struct obdo *oa) { struct ofd_device *ofd = ofd_obj2dev(fo); struct ofd_inconsistency_item *oii; struct filter_fid *ff; bool wakeup = false; OBD_ALLOC_PTR(oii); if (oii == NULL) return; INIT_LIST_HEAD(&oii->oii_list); lu_object_get(&fo->ofo_obj.do_lu); oii->oii_obj = fo; ff = &oii->oii_ff; ff->ff_parent.f_seq = oa->o_parent_seq; ff->ff_parent.f_oid = oa->o_parent_oid; ff->ff_parent.f_stripe_idx = oa->o_stripe_idx; ff->ff_layout = oa->o_layout; spin_lock(&ofd->ofd_inconsistency_lock); if (fo->ofo_pfid_checking || fo->ofo_pfid_verified) { spin_unlock(&ofd->ofd_inconsistency_lock); OBD_FREE_PTR(oii); return; } fo->ofo_pfid_checking = 1; if (list_empty(&ofd->ofd_inconsistency_list)) wakeup = true; list_add_tail(&oii->oii_list, &ofd->ofd_inconsistency_list); spin_unlock(&ofd->ofd_inconsistency_lock); if (wakeup) wake_up_all(&ofd->ofd_inconsistency_thread.t_ctl_waitq); /* XXX: When the found inconsistency exceeds some threshold, * we can trigger the LFSCK to scan part of the system * or the whole system, which depends on how to define * the threshold, a simple way maybe like that: define * the absolute value of how many inconsisteny allowed * to be repaired via self detect/repair mechanism, if * exceeded, then trigger the LFSCK to scan the layout * inconsistency within the whole system. */ }
/* * Connect a quota master to the backend OSD device. * * \param env - is the environment passed by the caller * \param qmt - is the quota master target to be connected * \param cfg - is the configuration log record from which we need to extract * the service name of the backend OSD device to connect to. * * \retval - 0 on success, appropriate error on failure */ static int qmt_connect_to_osd(const struct lu_env *env, struct qmt_device *qmt, struct lustre_cfg *cfg) { struct obd_connect_data *data = NULL; struct obd_device *obd; struct lu_device *ld = qmt2lu_dev(qmt); int rc; ENTRY; LASSERT(qmt->qmt_child_exp == NULL); OBD_ALLOC_PTR(data); if (data == NULL) GOTO(out, rc = -ENOMEM); /* look-up OBD device associated with the backend OSD device. * The MDT is kind enough to pass the OBD name in QMT configuration */ obd = class_name2obd(lustre_cfg_string(cfg, 3)); if (obd == NULL) { CERROR("%s: can't locate backend osd device: %s\n", qmt->qmt_svname, lustre_cfg_string(cfg, 3)); GOTO(out, rc = -ENOTCONN); } data->ocd_connect_flags = OBD_CONNECT_VERSION; data->ocd_version = LUSTRE_VERSION_CODE; /* connect to OSD device */ rc = obd_connect(NULL, &qmt->qmt_child_exp, obd, &obd->obd_uuid, data, NULL); if (rc) { CERROR("%s: cannot connect to osd dev %s (%d)\n", qmt->qmt_svname, obd->obd_name, rc); GOTO(out, rc); } /* initialize site (although it isn't used anywhere) and lu_device * pointer to next device */ qmt->qmt_child = lu2dt_dev(qmt->qmt_child_exp->exp_obd->obd_lu_dev); ld->ld_site = qmt->qmt_child_exp->exp_obd->obd_lu_dev->ld_site; EXIT; out: if (data) OBD_FREE_PTR(data); return rc; }
static struct page *llu_get_user_page(int index, void *addr, int offset, int count) { struct page *page; OBD_ALLOC_PTR(page); if (!page) return NULL; page->index = index; page->addr = addr; page->_offset = offset; page->_count = count; CFS_INIT_LIST_HEAD(&page->list); CFS_INIT_LIST_HEAD(&page->_node); return page; }