static int gss_proc_read_oos(char *page, char **start, off_t off, int count, int *eof, void *data) { int written; written = snprintf(page, count, "seqwin: %u\n" "backwin: %u\n" "client fall behind seqwin\n" " occurrence: %d\n" " max seq behind: %d\n" "server replay detected:\n" " phase 0: %d\n" " phase 1: %d\n" " phase 2: %d\n" "server verify ok:\n" " phase 2: %d\n", GSS_SEQ_WIN_MAIN, GSS_SEQ_WIN_BACK, cfs_atomic_read(&gss_stat_oos.oos_cli_count), gss_stat_oos.oos_cli_behind, cfs_atomic_read(&gss_stat_oos.oos_svc_replay[0]), cfs_atomic_read(&gss_stat_oos.oos_svc_replay[1]), cfs_atomic_read(&gss_stat_oos.oos_svc_replay[2]), cfs_atomic_read(&gss_stat_oos.oos_svc_pass[2])); return written; }
static void exit_libcfs_module(void) { int rc; remove_proc(); CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n", cfs_atomic_read(&libcfs_kmemory)); cfs_wi_shutdown(); rc = cfs_psdev_deregister(&libcfs_dev); if (rc) CERROR("misc_deregister error %d\n", rc); #if LWT_SUPPORT lwt_fini(); #endif if (cfs_atomic_read(&libcfs_kmemory) != 0) CERROR("Portals memory leaked: %d bytes\n", cfs_atomic_read(&libcfs_kmemory)); rc = libcfs_debug_cleanup(); if (rc) printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n", rc); cfs_fini_rwsem(&ioctl_list_sem); cfs_fini_rwsem(&cfs_tracefile_sem); libcfs_arch_cleanup(); }
static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf) { struct lov_stripe_md *lsm = NULL; struct lov_object *lov = cl2lov(obj); int result = 0; ENTRY; lov_conf_lock(lov); if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { lov->lo_layout_invalid = true; GOTO(out, result = 0); } if (conf->coc_opc == OBJECT_CONF_WAIT) { if (lov->lo_layout_invalid && cfs_atomic_read(&lov->lo_active_ios) > 0) { lov_conf_unlock(lov); result = lov_layout_wait(env, lov); lov_conf_lock(lov); } GOTO(out, result); } LASSERT(conf->coc_opc == OBJECT_CONF_SET); if (conf->u.coc_md != NULL) lsm = conf->u.coc_md->lsm; if ((lsm == NULL && lov->lo_lsm == NULL) || ((lsm != NULL && lov->lo_lsm != NULL) && (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) { /* same version of layout */ lov->lo_layout_invalid = false; GOTO(out, result = 0); } /* will change layout - check if there still exists active IO. */ if (cfs_atomic_read(&lov->lo_active_ios) > 0) { lov->lo_layout_invalid = true; GOTO(out, result = -EBUSY); } lov->lo_layout_invalid = lov_layout_change(env, lov, conf); EXIT; out: lov_conf_unlock(lov); CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n", PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid); RETURN(result); }
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) { struct l_wait_info lwi = { 0 }; ENTRY; while (cfs_atomic_read(&lov->lo_active_ios) > 0) { CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n", PFID(lu_object_fid(lov2lu(lov))), cfs_atomic_read(&lov->lo_active_ios)); l_wait_event(lov->lo_waitq, cfs_atomic_read(&lov->lo_active_ios) == 0, &lwi); } RETURN(0); }
int ptlrpc_connection_put(struct ptlrpc_connection *conn) { int rc = 0; ENTRY; if (!conn) RETURN(rc); LASSERT(!cfs_hlist_unhashed(&conn->c_hash)); /* * We do not remove connection from hashtable and * do not free it even if last caller released ref, * as we want to have it cached for the case it is * needed again. * * Deallocating it and later creating new connection * again would be wastful. This way we also avoid * expensive locking to protect things from get/put * race when found cached connection is freed by * ptlrpc_connection_put(). * * It will be freed later in module unload time, * when ptlrpc_connection_fini()->lh_exit->conn_exit() * path is called. */ if (cfs_atomic_dec_return(&conn->c_refcount) == 1) rc = 1; CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n", conn, cfs_atomic_read(&conn->c_refcount), libcfs_nid2str(conn->c_peer.nid)); RETURN(rc); }
static int osc_rd_destroys_in_flight(char *page, char **start, off_t off, int count, int *eof, void *data) { struct obd_device *obd = data; return snprintf(page, count, "%u\n", cfs_atomic_read(&obd->u.cli.cl_destroy_in_flight)); }
static void conn_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode) { struct ptlrpc_connection *conn; conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash); /* * Nothing should be left. Connection user put it and * connection also was deleted from table by this time * so we should have 0 refs. */ LASSERTF(cfs_atomic_read(&conn->c_refcount) == 0, "Busy connection with %d refs\n", cfs_atomic_read(&conn->c_refcount)); OBD_FREE_PTR(conn); }
/* push / pop to root of obd store */ void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx, struct lvfs_ucred *uc) { //ASSERT_NOT_KERNEL_CTXT("already in kernel context!\n"); ASSERT_CTXT_MAGIC(new_ctx->magic); OBD_SET_CTXT_MAGIC(save); save->fs = get_fs(); LASSERT(cfs_atomic_read(&cfs_fs_pwd(current->fs)->d_count)); LASSERT(cfs_atomic_read(&new_ctx->pwd->d_count)); save->pwd = dget(cfs_fs_pwd(current->fs)); save->pwdmnt = mntget(cfs_fs_mnt(current->fs)); save->luc.luc_umask = cfs_curproc_umask(); save->ngroups = current_cred()->group_info->ngroups; LASSERT(save->pwd); LASSERT(save->pwdmnt); LASSERT(new_ctx->pwd); LASSERT(new_ctx->pwdmnt); if (uc) { struct cred *cred; save->luc.luc_uid = current_uid(); save->luc.luc_gid = current_gid(); save->luc.luc_fsuid = current_fsuid(); save->luc.luc_fsgid = current_fsgid(); save->luc.luc_cap = current_cap(); if ((cred = prepare_creds())) { cred->uid = uc->luc_uid; cred->gid = uc->luc_gid; cred->fsuid = uc->luc_fsuid; cred->fsgid = uc->luc_fsgid; cred->cap_effective = uc->luc_cap; commit_creds(cred); } push_group_info(save, uc->luc_ginfo ?: uc->luc_identity ? uc->luc_identity->mi_ginfo : NULL); } current->fs->umask = 0; /* umask already applied on client */ set_fs(new_ctx->fs); ll_set_fs_pwd(current->fs, new_ctx->pwdmnt, new_ctx->pwd); }
static int lov_layout_change(const struct lu_env *unused, struct lov_object *lov, const struct cl_object_conf *conf) { int result; enum lov_layout_type llt = LLT_EMPTY; union lov_layout_state *state = &lov->u; const struct lov_layout_operations *old_ops; const struct lov_layout_operations *new_ops; struct cl_object_header *hdr = cl_object_header(&lov->lo_cl); void *cookie; struct lu_env *env; int refcheck; ENTRY; LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); if (conf->u.coc_md != NULL) llt = lov_type(conf->u.coc_md->lsm); LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); cookie = cl_env_reenter(); env = cl_env_get(&refcheck); if (IS_ERR(env)) { cl_env_reexit(cookie); RETURN(PTR_ERR(env)); } old_ops = &lov_dispatch[lov->lo_type]; new_ops = &lov_dispatch[llt]; result = old_ops->llo_delete(env, lov, &lov->u); if (result == 0) { old_ops->llo_fini(env, lov, &lov->u); LASSERT(cfs_atomic_read(&lov->lo_active_ios) == 0); LASSERT(hdr->coh_tree.rnode == NULL); LASSERT(hdr->coh_pages == 0); lov->lo_type = LLT_EMPTY; result = new_ops->llo_init(env, lu2lov_dev(lov->lo_cl.co_lu.lo_dev), lov, conf, state); if (result == 0) { new_ops->llo_install(env, lov, state); lov->lo_type = llt; } else { new_ops->llo_delete(env, lov, state); new_ops->llo_fini(env, lov, state); /* this file becomes an EMPTY file. */ } } cl_env_put(env, &refcheck); cl_env_reexit(cookie); RETURN(result); }
void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm) { if (lsm == NULL) return; CDEBUG(D_INODE, "lsm %p decref %d by %p.\n", lsm, cfs_atomic_read(&lsm->lsm_refc), current); lov_free_memmd(&lsm); }
static inline int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) { LASSERT(ctx->cc_sec); LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); return ctx_check_death_pf(ctx, freelist); }
__s64 lprocfs_read_helper(struct lprocfs_counter *lc, enum lprocfs_fields_flags field) { __s64 ret = 0; int centry; if (!lc) RETURN(0); do { centry = cfs_atomic_read(&lc->lc_cntl.la_entry); switch (field) { case LPROCFS_FIELDS_FLAGS_CONFIG: ret = lc->lc_config; break; case LPROCFS_FIELDS_FLAGS_SUM: ret = lc->lc_sum + lc->lc_sum_irq; break; case LPROCFS_FIELDS_FLAGS_MIN: ret = lc->lc_min; break; case LPROCFS_FIELDS_FLAGS_MAX: ret = lc->lc_max; break; case LPROCFS_FIELDS_FLAGS_AVG: ret = (lc->lc_max - lc->lc_min)/2; break; case LPROCFS_FIELDS_FLAGS_SUMSQUARE: ret = lc->lc_sumsquare; break; case LPROCFS_FIELDS_FLAGS_COUNT: ret = lc->lc_count; break; default: break; }; } while (centry != cfs_atomic_read(&lc->lc_cntl.la_entry) && centry != cfs_atomic_read(&lc->lc_cntl.la_exit)); RETURN(ret); }
static void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace) { LASSERT(ctx->cc_sec); LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); cli_ctx_expire(ctx); spin_lock(&ctx->cc_sec->ps_lock); if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) { LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1); cfs_hlist_del_init(&ctx->cc_cache); if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) LBUG(); } spin_unlock(&ctx->cc_sec->ps_lock); }
struct ptlrpc_connection * ptlrpc_connection_addref(struct ptlrpc_connection *conn) { ENTRY; cfs_atomic_inc(&conn->c_refcount); CDEBUG(D_INFO, "conn=%p refcount %d to %s\n", conn, cfs_atomic_read(&conn->c_refcount), libcfs_nid2str(conn->c_peer.nid)); RETURN(conn); }
int lov_free_memmd(struct lov_stripe_md **lsmp) { struct lov_stripe_md *lsm = *lsmp; int refc; *lsmp = NULL; LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 0); if ((refc = cfs_atomic_dec_return(&lsm->lsm_refc)) == 0) { LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); lsm_op_find(lsm->lsm_magic)->lsm_free(lsm); } return refc; }
static int lov_print_released(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_stripe_md *lsm = lov->lo_lsm; (*p)(env, cookie, "released: %s, lsm{%p 0x%08X %d %u %u}:\n", lov->lo_layout_invalid ? "invalid" : "valid", lsm, lsm->lsm_magic, cfs_atomic_read(&lsm->lsm_refc), lsm->lsm_stripe_count, lsm->lsm_layout_gen); return 0; }
/* * __free_page * To free the struct page including the page * * Arguments: * pg: pointer to the struct page strcture * * Return Value: * N/A * * Notes: * N/A */ void __free_page(struct page *pg) { ASSERT(pg != NULL); ASSERT(pg->addr != NULL); ASSERT(cfs_atomic_read(&pg->count) <= 1); if (!test_bit(PG_virt, &pg->flags)) { kmem_cache_free(cfs_page_p_slab, pg->addr); cfs_atomic_dec(&libcfs_total_pages); } else { cfs_enter_debugger(); } kmem_cache_free(cfs_page_t_slab, pg); }
struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) { struct lov_stripe_md *lsm = NULL; lov_conf_freeze(lov); if (lov->lo_lsm != NULL) { lsm = lsm_addref(lov->lo_lsm); CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n", lsm, cfs_atomic_read(&lsm->lsm_refc), lov->lo_layout_invalid, current); } lov_conf_thaw(lov); return lsm; }
static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) { struct obd_device *dev = seq->private; struct client_obd *cli = &dev->u.cli; struct ptlrpc_sec *sec = NULL; char str[32]; LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 || strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 || strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0); if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); if (sec == NULL) goto out; sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); seq_printf(seq, "rpc flavor: %s\n", sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc)); seq_printf(seq, "bulk flavor: %s\n", sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str))); seq_printf(seq, "flags: %s\n", sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str))); seq_printf(seq, "id: %d\n", sec->ps_id); seq_printf(seq, "refcount: %d\n", cfs_atomic_read(&sec->ps_refcount)); seq_printf(seq, "nctx: %d\n", cfs_atomic_read(&sec->ps_nctx)); seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval); seq_printf(seq, "gc next %ld\n", sec->ps_gc_interval ? sec->ps_gc_next - cfs_time_current_sec() : 0); sptlrpc_sec_put(sec); out: return 0; }
static void ctx_list_destroy_pf(cfs_hlist_head_t *head) { struct ptlrpc_cli_ctx *ctx; while (!cfs_hlist_empty(head)) { ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache); LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); cfs_hlist_del_init(&ctx->cc_cache); ctx_destroy_pf(ctx->cc_sec, ctx); } }
/* * Check whether file has possible unwriten pages. * * \retval 1 file is mmap-ed or has dirty pages * 0 otherwise */ blkcnt_t dirty_cnt(struct inode *inode) { blkcnt_t cnt = 0; #ifdef __KERNEL__ struct ccc_object *vob = cl_inode2ccc(inode); void *results[1]; if (inode->i_mapping != NULL) cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree, results, 0, 1, PAGECACHE_TAG_DIRTY); if (cnt == 0 && cfs_atomic_read(&vob->cob_mmap_cnt) > 0) cnt = 1; #endif return (cnt > 0) ? 1 : 0; }
/* * caller must hold spinlock */ static void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) { LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock); LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) { __cfs_hlist_del(&ctx->cc_cache); cfs_hlist_add_head(&ctx->cc_cache, freelist); } else { cfs_hlist_del_init(&ctx->cc_cache); } }
int obd_alloc_fail(const void *ptr, const char *name, const char *type, size_t size, const char *file, int line) { if (ptr == NULL || (cfs_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) { CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n", ptr ? "force " :"", type, name, (__u64)size, file, line); CERROR(LPU64" total bytes and "LPU64" total pages " "("LPU64" bytes) allocated by Lustre, " "%d total bytes by LNET\n", obd_memory_sum(), obd_pages_sum() << CFS_PAGE_SHIFT, obd_pages_sum(), cfs_atomic_read(&libcfs_kmemory)); return 1; } return 0; }
static int vvp_object_print(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct ccc_object *obj = lu2ccc(o); struct inode *inode = obj->cob_inode; struct ll_inode_info *lli; (*p)(env, cookie, "(%s %d %d) inode: %p ", cfs_list_empty(&obj->cob_pending_list) ? "-" : "+", obj->cob_transient_pages, cfs_atomic_read(&obj->cob_mmap_cnt), inode); if (inode) { lli = ll_i2info(inode); (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID, inode->i_ino, inode->i_generation, inode->i_mode, inode->i_nlink, atomic_read(&inode->i_count), lli->lli_clob, PFID(&lli->lli_fid)); } return 0; }
struct ptlrpc_connection * ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self, struct obd_uuid *uuid) { struct ptlrpc_connection *conn, *conn2; ENTRY; conn = cfs_hash_lookup(conn_hash, &peer); if (conn) GOTO(out, conn); OBD_ALLOC_PTR(conn); if (!conn) RETURN(NULL); conn->c_peer = peer; conn->c_self = self; CFS_INIT_HLIST_NODE(&conn->c_hash); cfs_atomic_set(&conn->c_refcount, 1); if (uuid) obd_str2uuid(&conn->c_remote_uuid, uuid->uuid); /* * Add the newly created conn to the hash, on key collision we * lost a racing addition and must destroy our newly allocated * connection. The object which exists in the has will be * returned and may be compared against out object. */ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash); if (conn != conn2) { OBD_FREE_PTR(conn); conn = conn2; } EXIT; out: CDEBUG(D_INFO, "conn=%p refcount %d to %s\n", conn, cfs_atomic_read(&conn->c_refcount), libcfs_nid2str(conn->c_peer.nid)); return conn; }
static int lov_print_raid0(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_layout_raid0 *r0 = lov_r0(lov); struct lov_stripe_md *lsm = lov->lo_lsm; int i; (*p)(env, cookie, "stripes: %d, %svalid, lsm{%p 0x%08X %d %u %u}: \n", r0->lo_nr, lov->lo_layout_invalid ? "in" : "", lsm, lsm->lsm_magic, cfs_atomic_read(&lsm->lsm_refc), lsm->lsm_stripe_count, lsm->lsm_layout_gen); for (i = 0; i < r0->lo_nr; ++i) { struct lu_object *sub; if (r0->lo_sub[i] != NULL) { sub = lovsub2lu(r0->lo_sub[i]); lu_object_print(env, cookie, p, sub); } else (*p)(env, cookie, "sub %d absent\n", i); } return 0; }
void llog_handle_put(struct llog_handle *loghandle) { LASSERT(cfs_atomic_read(&loghandle->lgh_refcount) > 0); if (cfs_atomic_dec_and_test(&loghandle->lgh_refcount)) llog_free_handle(loghandle); }
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx) { struct ptlrpc_cli_ctx *ctx = &gctx->gc_base; struct obd_import *imp = ctx->cc_sec->ps_import; struct ptlrpc_request *req; struct ptlrpc_user_desc *pud; int rc; ENTRY; LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0); if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) { CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, " "don't send destroy rpc\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); RETURN(0); } cfs_might_sleep(); CWARN("%s ctx %p idx "LPX64" (%u->%s)\n", sec_is_reverse(ctx->cc_sec) ? "server finishing reverse" : "client finishing forward", ctx, gss_handle_to_u64(&gctx->gc_handle), ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY; req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX); if (req == NULL) { CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n", ctx, ctx->cc_vcred.vc_uid); GOTO(out, rc = -ENOMEM); } rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI, NULL, ctx); if (rc) { ptlrpc_request_free(req); GOTO(out_ref, rc); } /* fix the user desc */ if (req->rq_pack_udesc) { /* we rely the fact that this request is in AUTH mode, * and user_desc at offset 2. */ pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud)); LASSERT(pud); pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid; pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid; pud->pud_cap = 0; pud->pud_ngroups = 0; } req->rq_phase = RQ_PHASE_RPC; rc = ptl_send_rpc(req, 1); if (rc) CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc); out_ref: ptlrpc_req_finished(req); out: RETURN(rc); }
/** * Send request \a request. * if \a noreply is set, don't expect any reply back and don't set up * reply buffers. * Returns 0 on success or error code. */ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) { int rc; int rc2; int mpflag = 0; struct ptlrpc_connection *connection; lnet_handle_me_t reply_me_h; lnet_md_t reply_md; struct obd_device *obd = request->rq_import->imp_obd; ENTRY; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC)) RETURN(0); LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST); LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged * cleanly from the previous attempt */ LASSERT(!request->rq_receiving_reply); if (request->rq_import->imp_obd && request->rq_import->imp_obd->obd_fail) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", request->rq_import->imp_obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ request->rq_err = 1; request->rq_status = -ENODEV; RETURN(-ENODEV); } connection = request->rq_import->imp_connection; lustre_msg_set_handle(request->rq_reqmsg, &request->rq_import->imp_remote_handle); lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST); lustre_msg_set_conn_cnt(request->rq_reqmsg, request->rq_import->imp_conn_cnt); lustre_msghdr_set_flags(request->rq_reqmsg, request->rq_import->imp_msghdr_flags); if (request->rq_resend) lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT); if (request->rq_memalloc) mpflag = cfs_memory_pressure_get_and_set(); rc = sptlrpc_cli_wrap_request(request); if (rc) GOTO(out, rc); /* bulk register should be done after wrap_request() */ if (request->rq_bulk != NULL) { rc = ptlrpc_register_bulk (request); if (rc != 0) GOTO(out, rc); } if (!noreply) { LASSERT (request->rq_replen != 0); if (request->rq_repbuf == NULL) { LASSERT(request->rq_repdata == NULL); LASSERT(request->rq_repmsg == NULL); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in * ptlrpc_queue_wait */ request->rq_err = 1; request->rq_status = rc; GOTO(cleanup_bulk, rc); } } else { request->rq_repdata = NULL; request->rq_repmsg = NULL; } rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/ connection->c_peer, request->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &reply_me_h); if (rc != 0) { CERROR("LNetMEAttach failed: %d\n", rc); LASSERT (rc == -ENOMEM); GOTO(cleanup_bulk, rc = -ENOMEM); } } spin_lock(&request->rq_lock); /* If the MD attach succeeds, there _will_ be a reply_in callback */ request->rq_receiving_reply = !noreply; /* We are responsible for unlinking the reply buffer */ request->rq_must_unlink = !noreply; /* Clear any flags that may be present from previous sends. */ request->rq_replied = 0; request->rq_err = 0; request->rq_timedout = 0; request->rq_net_err = 0; request->rq_resend = 0; request->rq_restart = 0; request->rq_reply_truncate = 0; spin_unlock(&request->rq_lock); if (!noreply) { reply_md.start = request->rq_repbuf; reply_md.length = request->rq_repbuf_len; /* Allow multiple early replies */ reply_md.threshold = LNET_MD_THRESH_INF; /* Manage remote for early replies */ reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MANAGE_REMOTE | LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */; reply_md.user_ptr = &request->rq_reply_cbid; reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_must_unlink, so we can't auto-unlink */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { CERROR("LNetMDAttach failed: %d\n", rc); LASSERT (rc == -ENOMEM); spin_lock(&request->rq_lock); /* ...but the MD attach didn't succeed... */ request->rq_receiving_reply = 0; spin_unlock(&request->rq_lock); GOTO(cleanup_me, rc = -ENOMEM); } CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64 ", portal %u\n", request->rq_repbuf_len, request->rq_xid, request->rq_reply_portal); } /* add references on request for request_out_callback */ ptlrpc_request_addref(request); if (obd->obd_svc_stats != NULL) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, cfs_atomic_read(&request->rq_import->imp_inflight)); OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5); cfs_gettimeofday(&request->rq_arrival_time); request->rq_sent = cfs_time_current_sec(); /* We give the server rq_timeout secs to process the req, and add the network latency for our local timeout. */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); ptlrpc_pinger_sending_on_import(request->rq_import); DEBUG_REQ(D_INFO, request, "send flg=%x", lustre_msg_get_flags(request->rq_reqmsg)); rc = ptl_send_buf(&request->rq_req_md_h, request->rq_reqbuf, request->rq_reqdata_len, LNET_NOACK_REQ, &request->rq_req_cbid, connection, request->rq_request_portal, request->rq_xid, 0); if (rc == 0) GOTO(out, rc); ptlrpc_req_finished(request); if (noreply) GOTO(out, rc); cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to * access the reply buffer. */ rc2 = LNetMEUnlink(reply_me_h); LASSERT (rc2 == 0); /* UNLINKED callback called synchronously */ LASSERT(!request->rq_receiving_reply); cleanup_bulk: /* We do sync unlink here as there was no real transfer here so * the chance to have long unlink to sluggish net is smaller here. */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) cfs_memory_pressure_restore(mpflag); return rc; }
/* * Release qsd_qtype_info structure which contains data associated with a * given quota type. This releases the accounting objects. * It's called on OSD cleanup when the qsd instance is released. * * \param env - is the environment passed by the caller * \param qsd - is the qsd instance managing the qsd_qtype_info structure * to be released * \param qtype - is the quota type to be shutdown */ static void qsd_qtype_fini(const struct lu_env *env, struct qsd_instance *qsd, int qtype) { struct qsd_qtype_info *qqi; int repeat = 0; ENTRY; if (qsd->qsd_type_array[qtype] == NULL) RETURN_EXIT; qqi = qsd->qsd_type_array[qtype]; qsd->qsd_type_array[qtype] = NULL; /* all deferred work lists should be empty */ LASSERT(cfs_list_empty(&qqi->qqi_deferred_glb)); LASSERT(cfs_list_empty(&qqi->qqi_deferred_slv)); /* shutdown lquota site */ if (qqi->qqi_site != NULL && !IS_ERR(qqi->qqi_site)) { lquota_site_free(env, qqi->qqi_site); qqi->qqi_site = NULL; } /* The qqi may still be holding by global locks which are being * canceled asynchronously (LU-4365), see the following steps: * * - On server umount, we try to clear all quota locks first by * disconnecting LWP (which will invalidate import and cleanup * all locks on it), however, if quota reint process is holding * the global lock for reintegration at that time, global lock * will fail to be cleared on LWP disconnection. * * - Umount process goes on and stops reint process, the global * lock will be dropped on reint process exit, however, the lock * cancel in done in asynchronous way, so the * qsd_glb_blocking_ast() might haven't been called yet when we * get here. */ while (cfs_atomic_read(&qqi->qqi_ref) > 1) { CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n", cfs_atomic_read(&qqi->qqi_ref), repeat); repeat++; schedule_timeout_and_set_state(TASK_INTERRUPTIBLE, cfs_time_seconds(1)); } /* by now, all qqi users should have gone away */ LASSERT(cfs_atomic_read(&qqi->qqi_ref) == 1); lu_ref_fini(&qqi->qqi_reference); /* release accounting object */ if (qqi->qqi_acct_obj != NULL && !IS_ERR(qqi->qqi_acct_obj)) { lu_object_put(env, &qqi->qqi_acct_obj->do_lu); qqi->qqi_acct_obj = NULL; } /* release slv index */ if (qqi->qqi_slv_obj != NULL && !IS_ERR(qqi->qqi_slv_obj)) { lu_object_put(env, &qqi->qqi_slv_obj->do_lu); qqi->qqi_slv_obj = NULL; qqi->qqi_slv_ver = 0; } /* release global index */ if (qqi->qqi_glb_obj != NULL && !IS_ERR(qqi->qqi_glb_obj)) { lu_object_put(env, &qqi->qqi_glb_obj->do_lu); qqi->qqi_glb_obj = NULL; qqi->qqi_glb_ver = 0; } OBD_FREE_PTR(qqi); EXIT; }