static int osd_check_lma(const struct lu_env *env, struct osd_object *obj) { struct osd_thread_info *info = osd_oti_get(env); struct lu_buf buf; int rc; struct lustre_mdt_attrs *lma; ENTRY; CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma)); lma = (struct lustre_mdt_attrs *)info->oti_buf; buf.lb_buf = lma; buf.lb_len = sizeof(info->oti_buf); rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA); if (rc > 0) { rc = 0; lustre_lma_swab(lma); if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) || CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) { CWARN("%s: unsupported incompat LMA feature(s) %#x for " "fid = "DFID"\n", osd_obj2dev(obj)->od_svname, lma->lma_incompat & ~LMA_INCOMPAT_SUPP, PFID(lu_object_fid(&obj->oo_dt.do_lu))); rc = -EOPNOTSUPP; } } else if (rc == -ENODATA) { /* haven't initialize LMA xattr */ rc = 0; } RETURN(rc); }
static void libcfs_call_trace(struct task_struct *tsk) { if (tsk == current) dump_stack(); else CWARN("can't show stack: kernel doesn't export show_task\n"); }
static int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) { __u32 cksums, cksumc; LASSERT(req->rq_repdata); req->rq_repmsg = req->rq_repdata; req->rq_replen = req->rq_repdata_len; if (req->rq_early) { cksums = lustre_msg_get_cksum(req->rq_repdata); #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0) if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_CKSUM_INCOMPAT18) cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 0); else cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 1); #else # warning "remove checksum compatibility support for b1_8" cksumc = lustre_msg_calc_cksum(req->rq_repmsg); #endif if (cksumc != cksums) { CWARN("early reply checksum mismatch: %08x != %08x\n", cksumc, cksums); return -EINVAL; } } return 0; }
/** * Parses \<nidrange\> token of the syntax. * * \retval 1 if \a src parses to \<addrrange\> '@' \<net\> * \retval 0 otherwise */ static int parse_nidrange(struct lstr *src, cfs_list_t *nidlist) { struct lstr addrrange, net, tmp; struct nidrange *nr; tmp = *src; if (gettok(src, '@', &addrrange) == 0) goto failed; if (gettok(src, '@', &net) == 0 || src->ls_str != NULL) goto failed; nr = add_nidrange(&net, nidlist); if (nr == NULL) goto failed; if (!parse_addrange(&addrrange, nr)) goto failed; return 1; failed: CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str); return 0; }
/** * Server side bulk abort. Idempotent. Not thread-safe (i.e. only * serialises with completion callback) */ void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc) { struct l_wait_info lwi; int rc; LASSERT(!in_interrupt()); /* might sleep */ if (!ptlrpc_server_bulk_active(desc)) /* completed or */ return; /* never started */ /* We used to poison the pages with 0xab here because we did not want to * send any meaningful data over the wire for evicted clients (bug 9297) * However, this is no longer safe now that we use the page cache on the * OSS (bug 20560) */ /* The unlink ensures the callback happens ASAP and is the last * one. If it fails, it must be because completion just happened, * but we must still l_wait_event() in this case, to give liblustre * a chance to run server_bulk_callback()*/ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); for (;;) { /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), &lwi); if (rc == 0) return; LASSERT(rc == -ETIMEDOUT); CWARN("Unexpectedly long timeout: desc %p\n", desc); } }
int ksocknal_lib_tunables_init () { if (!*ksocknal_tunables.ksnd_typed_conns) { int rc = -EINVAL; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol < 3) rc = 0; #endif if (rc != 0) { CERROR("Protocol V3.x MUST have typed connections\n"); return rc; } } if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags < 2) *ksocknal_tunables.ksnd_zc_recv_min_nfrags = 2; if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags > LNET_MAX_IOV) *ksocknal_tunables.ksnd_zc_recv_min_nfrags = LNET_MAX_IOV; ksocknal_tunables.ksnd_sysctl = cfs_register_sysctl_table(ksocknal_top_ctl_table, 0); if (ksocknal_tunables.ksnd_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); return 0; }
static int lfsck_bookmark_load(const struct lu_env *env, struct lfsck_instance *lfsck) { loff_t pos = 0; int len = sizeof(struct lfsck_bookmark); int rc; rc = dt_record_read(env, lfsck->li_bookmark_obj, lfsck_buf_get(env, &lfsck->li_bookmark_disk, len), &pos); if (rc == 0) { struct lfsck_bookmark *bm = &lfsck->li_bookmark_ram; lfsck_bookmark_le_to_cpu(bm, &lfsck->li_bookmark_disk); if (bm->lb_magic != LFSCK_BOOKMARK_MAGIC) { CWARN("%.16s: invalid lfsck_bookmark magic " "0x%x != 0x%x\n", lfsck_lfsck2name(lfsck), bm->lb_magic, LFSCK_BOOKMARK_MAGIC); /* Process it as new lfsck_bookmark. */ rc = -ENODATA; } } else { if (rc == -EFAULT && pos == 0) /* return -ENODATA for empty lfsck_bookmark. */ rc = -ENODATA; else CERROR("%.16s: fail to load lfsck_bookmark, " "expected = %d, rc = %d\n", lfsck_lfsck2name(lfsck), len, rc); } return rc; }
int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) { const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : libcfs_debug_dbg2str; int m = 0; int matched; int n; int t; /* Allow a number for backwards compatibility */ for (n = strlen(str); n > 0; n--) if (!isspace(str[n-1])) break; matched = n; if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) { /* don't print warning for lctl set_param debug=0 or -1 */ if (m != 0 && m != -1) CWARN("You are trying to use a numerical value for the " "mask - this will be deprecated in a future " "release.\n"); *mask = m; return 0; } return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK, 0xffffffff); }
static int orph_key_test_and_del(const struct lu_env *env, struct mdd_device *mdd, struct lu_fid *lf, struct dt_key *key) { struct mdd_object *mdo; int rc; mdo = mdd_object_find(env, mdd, lf); if (IS_ERR(mdo)) return PTR_ERR(mdo); rc = -EBUSY; if (mdo->mod_count == 0) { CWARN("Found orphan! Delete it\n"); rc = orphan_object_destroy(env, mdo, key); } else { mdd_write_lock(env, mdo, MOR_TGT_CHILD); if (likely(mdo->mod_count > 0)) { CDEBUG(D_HA, "Found orphan, open count = %d\n", mdo->mod_count); mdo->mod_flags |= ORPHAN_OBJ; } mdd_write_unlock(env, mdo); } mdd_object_put(env, mdo); return rc; }
void ptlrpc_ni_fini(void) { wait_queue_head_t waitq; struct l_wait_info lwi; int rc; int retries; /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server * replies */ for (retries = 0;; retries++) { rc = LNetEQFree(ptlrpc_eq_h); switch (rc) { default: LBUG(); case 0: LNetNIFini(); return; case -EBUSY: if (retries != 0) CWARN("Event queue still busy\n"); /* Wait for a bit */ init_waitqueue_head(&waitq); lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL); l_wait_event(waitq, 0, &lwi); break; } } /* notreached */ }
static int mds_quota_setup(struct obd_device *obd) { struct obd_device_target *obt = &obd->u.obt; struct mds_obd *mds = &obd->u.mds; int rc; ENTRY; if (unlikely(mds->mds_quota)) { CWARN("try to reinitialize quota context!\n"); RETURN(0); } cfs_init_rwsem(&obt->obt_rwsem); obt->obt_qfmt = LUSTRE_QUOTA_V2; mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2; cfs_sema_init(&obt->obt_quotachecking, 1); /* initialize quota master and quota context */ cfs_init_rwsem(&mds->mds_qonoff_sem); rc = qctxt_init(obd, dqacq_handler); if (rc) { CERROR("%s: initialize quota context failed! (rc:%d)\n", obd->obd_name, rc); RETURN(rc); } mds->mds_quota = 1; RETURN(rc); }
/** * \retval +ve: the lfsck_namespace is broken, the caller should reset it. * \retval 0: succeed. * \retval -ve: failed cases. */ static int lfsck_namespace_load(const struct lu_env *env, struct lfsck_component *com) { int len = com->lc_file_size; int rc; rc = dt_xattr_get(env, com->lc_obj, lfsck_buf_get(env, com->lc_file_disk, len), XATTR_NAME_LFSCK_NAMESPACE, BYPASS_CAPA); if (rc == len) { struct lfsck_namespace *ns = com->lc_file_ram; lfsck_namespace_le_to_cpu(ns, (struct lfsck_namespace *)com->lc_file_disk); if (ns->ln_magic != LFSCK_NAMESPACE_MAGIC) { CWARN("%s: invalid lfsck_namespace magic %#x != %#x\n", lfsck_lfsck2name(com->lc_lfsck), ns->ln_magic, LFSCK_NAMESPACE_MAGIC); rc = 1; } else { rc = 0; } } else if (rc != -ENODATA) { CERROR("%s: fail to load lfsck_namespace: expected = %d, " "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck), len, rc); if (rc >= 0) rc = 1; } return rc; }
/* temporary for testing */ static int mdc_wr_kuc(struct file *file, const char *buffer, unsigned long count, void *data) { struct obd_device *obd = data; struct kuc_hdr *lh; struct hsm_action_list *hal; struct hsm_action_item *hai; int len; int fd, rc; ENTRY; rc = lprocfs_write_helper(buffer, count, &fd); if (rc) RETURN(rc); if (fd < 0) RETURN(-ERANGE); CWARN("message to fd %d\n", fd); len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN + /* for mockup below */ 2 * cfs_size_round(sizeof(*hai)); OBD_ALLOC(lh, len); lh->kuc_magic = KUC_MAGIC; lh->kuc_transport = KUC_TRANSPORT_HSM; lh->kuc_msgtype = HMT_ACTION_LIST; lh->kuc_msglen = len; hal = (struct hsm_action_list *)(lh + 1); hal->hal_version = HAL_VERSION; hal->hal_archive_id = 1; hal->hal_flags = 0; obd_uuid2fsname(hal->hal_fsname, obd->obd_name, MTI_NAME_MAXLEN); /* mock up an action list */ hal->hal_count = 2; hai = hai_zero(hal); hai->hai_action = HSMA_ARCHIVE; hai->hai_fid.f_oid = 5; hai->hai_len = sizeof(*hai); hai = hai_next(hai); hai->hai_action = HSMA_RESTORE; hai->hai_fid.f_oid = 10; hai->hai_len = sizeof(*hai); /* This works for either broadcast or unicast to a single fd */ if (fd == 0) { rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh); } else { cfs_file_t *fp = cfs_get_fd(fd); rc = libcfs_kkuc_msg_put(fp, lh); cfs_put_file(fp); } OBD_FREE(lh, len); if (rc < 0) RETURN(rc); RETURN(count); }
/** * Lookup method for "fid" object. Only filenames with correct SEQ:OID format * are valid. We also check if object with passed fid exists or not. */ static int obf_lookup(const struct lu_env *env, struct md_object *p, const struct lu_name *lname, struct lu_fid *f, struct md_op_spec *spec) { char *name = (char *)lname->ln_name; struct mdd_device *mdd = mdo2mdd(p); struct mdd_object *child; int rc = 0; while (*name == '[') name++; sscanf(name, SFID, RFID(f)); if (!fid_is_sane(f)) { CWARN("%s: Trying to lookup invalid FID [%s] in %s/%s, FID " "format should be "DFID"\n", mdd2obd_dev(mdd)->obd_name, lname->ln_name, dot_lustre_name, mdd_obf_dir_name, (__u64)FID_SEQ_NORMAL, 1, 0); GOTO(out, rc = -EINVAL); } if (!fid_is_norm(f) && !fid_is_igif(f) && !fid_is_root(f) && !fid_seq_is_dot(f->f_seq)) { CWARN("%s: Trying to lookup invalid FID "DFID" in %s/%s, " "sequence should be >= "LPX64" or within ["LPX64"," ""LPX64"].\n", mdd2obd_dev(mdd)->obd_name, PFID(f), dot_lustre_name, mdd_obf_dir_name, (__u64)FID_SEQ_NORMAL, (__u64)FID_SEQ_IGIF, (__u64)FID_SEQ_IGIF_MAX); GOTO(out, rc = -EINVAL); } /* Check if object with this fid exists */ child = mdd_object_find(env, mdd, f); if (child == NULL) GOTO(out, rc = 0); if (IS_ERR(child)) GOTO(out, rc = PTR_ERR(child)); if (mdd_object_exists(child) == 0) rc = -ENOENT; mdd_object_put(env, child); out: return rc; }
static int llog_catinfo_cb(struct llog_handle *cat, struct llog_rec_hdr *rec, void *data) { static char *out = NULL; static int remains = 0; struct llog_ctxt *ctxt = NULL; struct llog_handle *handle = NULL; struct llog_logid *logid; struct llog_logid_rec *lir; int l, rc, index, count = 0; struct cb_data *cbd = (struct cb_data*)data; ENTRY; if (cbd->init) { out = cbd->out; remains = cbd->remains; cbd->init = 0; } if (!(cat->lgh_hdr->llh_flags & LLOG_F_IS_CAT)) RETURN(-EINVAL); if (!cbd->ctxt) RETURN(-ENODEV); lir = (struct llog_logid_rec *)rec; logid = &lir->lid_id; rc = llog_create(ctxt, &handle, logid, NULL); if (rc) RETURN(-EINVAL); rc = llog_init_handle(handle, 0, NULL); if (rc) GOTO(out_close, rc); for (index = 1; index < (LLOG_BITMAP_BYTES * 8); index++) { if (ext2_test_bit(index, handle->lgh_hdr->llh_bitmap)) count++; } l = snprintf(out, remains, "\t[Log ID]: #"LPX64"#"LPX64"#%08x\n" "\tLog Size: %llu\n\tLast Index: %d\n" "\tUncanceled Records: %d\n", logid->lgl_oid, logid->lgl_oseq, logid->lgl_ogen, i_size_read(handle->lgh_file->f_dentry->d_inode), handle->lgh_last_idx, count); out += l; remains -= l; cbd->out = out; cbd->remains = remains; if (remains <= 0) { CWARN("Not enough memory\n"); rc = -ENOMEM; } EXIT; out_close: llog_close(handle); return rc; }
/** * Initialize the state descriptor for the specified hash algorithm. * * An internal routine to allocate the hash-specific state in \a hdesc for * use with cfs_crypto_hash_digest() to compute the hash of a single message, * though possibly in multiple chunks. The descriptor internal state should * be freed with cfs_crypto_hash_final(). * * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) * \param[out] type pointer to the hash description in hash_types[] array * \param[in,out] req ahash request to be initialized * \param[in] key initial hash value/state, NULL to use default value * \param[in] key_len length of \a key * * \retval 0 on success * \retval negative errno on failure */ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct ahash_request **req, unsigned char *key, unsigned int key_len) { struct crypto_ahash *tfm; int err = 0; *type = cfs_crypto_hash_type(hash_alg); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(tfm); } *req = ahash_request_alloc(tfm, GFP_KERNEL); if (!*req) { CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", (*type)->cht_name); crypto_free_ahash(tfm); return -ENOMEM; } ahash_request_set_callback(*req, 0, NULL, NULL); if (key) err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_ahash_setkey(tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err != 0) { ahash_request_free(*req); crypto_free_ahash(tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), cfs_crypto_hash_speeds[hash_alg]); err = crypto_ahash_init(*req); if (err) { ahash_request_free(*req); crypto_free_ahash(tfm); } return err; }
int kranal_tunables_init () { kranal_tunables.kra_sysctl = cfs_register_sysctl_table(kranal_top_ctl_table, 0); if (kranal_tunables.kra_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); return 0; }
int kqswnal_tunables_init () { kqswnal_tunables.kqn_sysctl = register_sysctl_table(kqswnal_top_ctl_table); if (kqswnal_tunables.kqn_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); return 0; }
int kiblnd_tunables_init (void) { if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", *kiblnd_tunables.kib_ib_mtu); return -EINVAL; } if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT) *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT; if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX) *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX; if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits) *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits; if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2) *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2; if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits) *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1; if (*kiblnd_tunables.kib_map_on_demand < 0 || *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS) *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */ if (*kiblnd_tunables.kib_map_on_demand == 1) *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ if (*kiblnd_tunables.kib_concurrent_sends == 0) { if (*kiblnd_tunables.kib_map_on_demand > 0 && *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; else *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits); } if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2) *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2; if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2) *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2; if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) { CWARN("Concurrent sends %d is lower than message queue size: %d, " "performance may drop slightly.\n", *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits); } kiblnd_sysctl_init(); return 0; }
int kptllnd_tunables_init () { kptllnd_tunables.kptl_sysctl = register_sysctl_table(kptllnd_top_ctl_table, 0); if (kptllnd_tunables.kptl_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); return 0; }
void kiblnd_sysctl_init (void) { kiblnd_initstrtunable(ipif_basename_space, ipif_name, sizeof(ipif_basename_space)); kiblnd_tunables.kib_sysctl = cfs_register_sysctl_table(kiblnd_top_ctl_table, 0); if (kiblnd_tunables.kib_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); }
static int plain_print_cb(struct llog_handle *llh, struct llog_rec_hdr *rec, void *data) { if (!(llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)) { CERROR("log is not plain\n"); RETURN(-EINVAL); } CWARN("seeing record at index %d in log "LPX64"\n", rec->lrh_index, llh->lgh_id.lgl_oid); RETURN(0); }
static void ctx_upcall_timeout_kr(unsigned long data) { struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data; struct key *key = ctx2gctx_keyring(ctx)->gck_key; CWARN("ctx %p, key %p\n", ctx, key); LASSERT(key); cli_ctx_expire(ctx); key_revoke_locked(key); }
static int llog_lvfs_write_blob(struct obd_device *obd, struct l_file *file, struct llog_rec_hdr *rec, void *buf, loff_t off) { int rc; struct llog_rec_tail end; loff_t saved_off = file->f_pos; int buflen = rec->lrh_len; ENTRY; file->f_pos = off; if (buflen == 0) CWARN("0-length record\n"); if (!buf) { rc = fsfilt_write_record(obd, file, rec, buflen,&file->f_pos,0); if (rc) { CERROR("error writing log record: rc %d\n", rc); goto out; } GOTO(out, rc = 0); } /* the buf case */ rec->lrh_len = sizeof(*rec) + buflen + sizeof(end); rc = fsfilt_write_record(obd, file, rec, sizeof(*rec), &file->f_pos, 0); if (rc) { CERROR("error writing log hdr: rc %d\n", rc); goto out; } rc = fsfilt_write_record(obd, file, buf, buflen, &file->f_pos, 0); if (rc) { CERROR("error writing log buffer: rc %d\n", rc); goto out; } end.lrt_len = rec->lrh_len; end.lrt_index = rec->lrh_index; rc = fsfilt_write_record(obd, file, &end, sizeof(end), &file->f_pos, 0); if (rc) { CERROR("error writing log tail: rc %d\n", rc); goto out; } rc = 0; out: if (saved_off > file->f_pos) file->f_pos = saved_off; LASSERT(rc <= 0); RETURN(rc); }
int kgnilnd_tunables_init() { int rc = 0; #if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM kgnilnd_tunables.kgn_sysctl = cfs_register_sysctl_table(kgnilnd_top_ctl_table, 0); if (kgnilnd_tunables.kgn_sysctl == NULL) CWARN("Can't setup /proc tunables\n"); #endif switch (*kgnilnd_tunables.kgn_checksum) { default: CERROR("Invalid checksum module parameter: %d\n", *kgnilnd_tunables.kgn_checksum); rc = -EINVAL; GOTO(out, rc); case GNILND_CHECKSUM_OFF: /* no checksumming */ break; case GNILND_CHECKSUM_SMSG_HEADER: LCONSOLE_INFO("SMSG header only checksumming enabled\n"); break; case GNILND_CHECKSUM_SMSG: LCONSOLE_INFO("SMSG checksumming enabled\n"); break; case GNILND_CHECKSUM_SMSG_BTE: LCONSOLE_INFO("SMSG + BTE checksumming enabled\n"); break; } if (*kgnilnd_tunables.kgn_max_immediate > GNILND_MAX_IMMEDIATE) { LCONSOLE_ERROR("kgnilnd module parameter 'max_immediate' too large %d > %d\n", *kgnilnd_tunables.kgn_max_immediate, GNILND_MAX_IMMEDIATE); rc = -EINVAL; GOTO(out, rc); } if (*kgnilnd_tunables.kgn_mbox_per_block < 1) { *kgnilnd_tunables.kgn_mbox_per_block = 1; } if (*kgnilnd_tunables.kgn_concurrent_sends == 0) { *kgnilnd_tunables.kgn_concurrent_sends = *kgnilnd_tunables.kgn_peer_credits; } else if (*kgnilnd_tunables.kgn_concurrent_sends > *kgnilnd_tunables.kgn_peer_credits) { LCONSOLE_ERROR("kgnilnd parameter 'concurrent_sends' too large: %d > %d (peer_credits)\n", *kgnilnd_tunables.kgn_concurrent_sends, *kgnilnd_tunables.kgn_peer_credits); rc = -EINVAL; } out: return rc; }
static int cfs_crypto_hash_alloc(unsigned char alg_id, const struct cfs_crypto_hash_type **type, struct hash_desc *desc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(alg_id); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", alg_id, CFS_HASH_ALG_MAX); return -EINVAL; } desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (desc->tfm == NULL) return -EINVAL; if (IS_ERR(desc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(desc->tfm); } desc->flags = 0; /** Shash have different logic for initialization then digest * shash: crypto_hash_setkey, crypto_hash_init * digest: crypto_digest_init, crypto_digest_setkey * Skip this function for digest, because we use shash logic at * cfs_crypto_hash_alloc. */ if (key != NULL) { err = crypto_hash_setkey(desc->tfm, key, key_len); } else if ((*type)->cht_key != 0) { err = crypto_hash_setkey(desc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); } if (err != 0) { crypto_free_hash(desc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[alg_id]); return crypto_hash_init(desc); }
/* Test named-log create/open, close */ static int llog_test_1(const struct lu_env *env, struct obd_device *obd, char *name) { struct llog_handle *llh; struct llog_ctxt *ctxt; int rc; int rc2; ENTRY; CWARN("1a: create a log with name: %s\n", name); ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT); LASSERT(ctxt); rc = llog_open_create(env, ctxt, &llh, NULL, name); if (rc) { CERROR("1a: llog_create with name %s failed: %d\n", name, rc); GOTO(out, rc); } rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, &uuid); if (rc) { CERROR("1a: can't init llog handle: %d\n", rc); GOTO(out_close, rc); } rc = verify_handle("1", llh, 1); CWARN("1b: close newly-created log\n"); out_close: rc2 = llog_close(env, llh); if (rc2) { CERROR("1b: close log %s failed: %d\n", name, rc2); if (rc == 0) rc = rc2; } out: llog_ctxt_put(ctxt); RETURN(rc); }
int lprocfs_quota_wr_type(struct file *file, const char *buffer, unsigned long count, void *data) { struct obd_device *obd = (struct obd_device *)data; struct obd_device_target *obt; int type = 0, is_mds; unsigned long i; char stype[MAX_STYPE_SIZE + 1] = ""; LASSERT(obd != NULL); obt = &obd->u.obt; is_mds = !strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME); if (count > MAX_STYPE_SIZE) return -EINVAL; if (cfs_copy_from_user(stype, buffer, count)) return -EFAULT; for (i = 0 ; i < count ; i++) { switch (stype[i]) { case 'u' : type |= USER_QUOTA; break; case 'g' : type |= GROUP_QUOTA; break; case '1' : case '2' : CWARN("quota_type options 1 and 2 are obsolete, " "they will be ignored\n"); break; case '3' : /* the only valid version spec, do nothing */ default : /* just skip stray symbols like \n */ break; } } if (type != 0) { int rc = auto_quota_on(obd, type - 1); if (rc && rc != -EALREADY && rc != -ENOENT) return rc; } return count; }
static void ping_client_fini (sfw_test_instance_t *tsi) { sfw_session_t *sn = tsi->tsi_batch->bat_session; int errors; LASSERT (sn != NULL); LASSERT (tsi->tsi_is_client); errors = atomic_read(&sn->sn_ping_errors); if (errors) CWARN ("%d pings have failed.\n", errors); else CDEBUG (D_NET, "Ping test finished OK.\n"); }
static int cat_print_cb(struct llog_handle *llh, struct llog_rec_hdr *rec, void *data) { struct llog_logid_rec *lir = (struct llog_logid_rec *)rec; if (rec->lrh_type != LLOG_LOGID_MAGIC) { CERROR("invalid record in catalog\n"); RETURN(-EINVAL); } CWARN("seeing record at index %d - "LPX64":%x in log "LPX64"\n", rec->lrh_index, lir->lid_id.lgl_oid, lir->lid_id.lgl_ogen, llh->lgh_id.lgl_oid); RETURN(0); }