/* * caller should hold one ref on contexts in freelist. */ static void dispose_ctx_list_kr(struct hlist_head *freelist) { struct hlist_node __maybe_unused *pos, *next; struct ptlrpc_cli_ctx *ctx; struct gss_cli_ctx *gctx; cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { hlist_del_init(&ctx->cc_cache); /* reverse ctx: update current seq to buddy svcctx if exist. * ideally this should be done at gss_cli_ctx_finalize(), but * the ctx destroy could be delayed by: * 1) ctx still has reference; * 2) ctx destroy is asynchronous; * and reverse import call inval_all_ctx() require this be done * _immediately_ otherwise newly created reverse ctx might copy * the very old sequence number from svcctx. */ gctx = ctx2gctx(ctx); if (!rawobj_empty(&gctx->gc_svc_handle) && sec_is_reverse(gctx->gc_base.cc_sec)) { gss_svc_upcall_update_sequence(&gctx->gc_svc_handle, (__u32) atomic_read(&gctx->gc_seq)); } /* we need to wakeup waiting reqs here. the context might * be forced released before upcall finished, then the * late-arrived downcall can't find the ctx even. */ sptlrpc_cli_ctx_wakeup(ctx); unbind_ctx_kr(ctx); ctx_put_kr(ctx, 0); }
static void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx) { struct gss_cli_ctx *gctx = ctx2gctx(ctx); if (gss_cli_ctx_fini_common(sec, ctx)) return; OBD_FREE_PTR(gctx); atomic_dec(&sec->ps_nctx); sptlrpc_sec_put(sec); }
int gss_cli_prep_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { int rc; ENTRY; LASSERT(req->rq_cli_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read); if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV) RETURN(0); rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx); if (rc) CERROR("bulk read: failed to prepare encryption " "pages: %d\n", rc); RETURN(rc); }