static void null_init_internal(void) { static CFS_HLIST_HEAD(__list); null_sec.ps_policy = &null_policy; cfs_atomic_set(&null_sec.ps_refcount, 1); /* always busy */ null_sec.ps_id = -1; null_sec.ps_import = NULL; null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL; null_sec.ps_flvr.sf_flags = 0; null_sec.ps_part = LUSTRE_SP_ANY; null_sec.ps_dying = 0; cfs_spin_lock_init(&null_sec.ps_lock); cfs_atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */ CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list); null_sec.ps_gc_interval = 0; null_sec.ps_gc_next = 0; cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list); cfs_atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */ null_cli_ctx.cc_sec = &null_sec; null_cli_ctx.cc_ops = &null_ctx_ops; null_cli_ctx.cc_expire = 0; null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL | PTLRPC_CTX_UPTODATE; null_cli_ctx.cc_vcred.vc_uid = 0; cfs_spin_lock_init(&null_cli_ctx.cc_lock); CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list); CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain); }
static void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash) { set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); atomic_inc(&ctx->cc_refcount); cfs_hlist_add_head(&ctx->cc_cache, hash); }
/* * caller must hold spinlock */ static void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) { assert_spin_locked(&ctx->cc_sec->ps_lock); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { __cfs_hlist_del(&ctx->cc_cache); cfs_hlist_add_head(&ctx->cc_cache, freelist); } else { cfs_hlist_del_init(&ctx->cc_cache); } }