int ptlrpc_connection_put(struct ptlrpc_connection *conn) { int rc = 0; ENTRY; if (!conn) RETURN(rc); LASSERT(!cfs_hlist_unhashed(&conn->c_hash)); /* * We do not remove connection from hashtable and * do not free it even if last caller released ref, * as we want to have it cached for the case it is * needed again. * * Deallocating it and later creating new connection * again would be wastful. This way we also avoid * expensive locking to protect things from get/put * race when found cached connection is freed by * ptlrpc_connection_put(). * * It will be freed later in module unload time, * when ptlrpc_connection_fini()->lh_exit->conn_exit() * path is called. */ if (cfs_atomic_dec_return(&conn->c_refcount) == 1) rc = 1; CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n", conn, cfs_atomic_read(&conn->c_refcount), libcfs_nid2str(conn->c_peer.nid)); RETURN(rc); }
/* * caller must hold spinlock */ static void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) { assert_spin_locked(&ctx->cc_sec->ps_lock); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { __cfs_hlist_del(&ctx->cc_cache); cfs_hlist_add_head(&ctx->cc_cache, freelist); } else { cfs_hlist_del_init(&ctx->cc_cache); } }
static void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace) { LASSERT(ctx->cc_sec); LASSERT(atomic_read(&ctx->cc_refcount) > 0); cli_ctx_expire(ctx); spin_lock(&ctx->cc_sec->ps_lock); if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) { LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); LASSERT(atomic_read(&ctx->cc_refcount) > 1); cfs_hlist_del_init(&ctx->cc_cache); if (atomic_dec_and_test(&ctx->cc_refcount)) LBUG(); } spin_unlock(&ctx->cc_sec->ps_lock); }