/*
 * caller should hold one ref on contexts in freelist.
 */
static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
	struct hlist_node	__maybe_unused *pos, *next;
	struct ptlrpc_cli_ctx	*ctx;
	struct gss_cli_ctx	*gctx;

	cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
		hlist_del_init(&ctx->cc_cache);

		/* reverse ctx: update current seq to buddy svcctx if exist.
		 * ideally this should be done at gss_cli_ctx_finalize(), but
		 * the ctx destroy could be delayed by:
		 *  1) ctx still has reference;
		 *  2) ctx destroy is asynchronous;
		 * and reverse import call inval_all_ctx() require this be done
		 * _immediately_ otherwise newly created reverse ctx might copy
		 * the very old sequence number from svcctx. */
		gctx = ctx2gctx(ctx);
		if (!rawobj_empty(&gctx->gc_svc_handle) &&
		    sec_is_reverse(gctx->gc_base.cc_sec)) {
			gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
					(__u32) atomic_read(&gctx->gc_seq));
		}

		/* we need to wakeup waiting reqs here. the context might
		 * be forced released before upcall finished, then the
		 * late-arrived downcall can't find the ctx even. */
		sptlrpc_cli_ctx_wakeup(ctx);

		unbind_ctx_kr(ctx);
		ctx_put_kr(ctx, 0);
	}
Exemple #2
0
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
{
	struct ptlrpc_cli_ctx	*ctx = &gctx->gc_base;
	struct obd_import	*imp = ctx->cc_sec->ps_import;
	struct ptlrpc_request	*req;
	struct ptlrpc_user_desc	*pud;
	int			 rc;
	ENTRY;

	LASSERT(atomic_read(&ctx->cc_refcount) > 0);

	if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
		CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
		       "don't send destroy rpc\n", ctx,
		       ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
		RETURN(0);
	}

	might_sleep();

	CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
	      sec_is_reverse(ctx->cc_sec) ?
	      "server finishing reverse" : "client finishing forward",
	      ctx, gss_handle_to_u64(&gctx->gc_handle),
	      ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));

        gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;

        req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX);
        if (req == NULL) {
                CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
                      ctx, ctx->cc_vcred.vc_uid);
                GOTO(out, rc = -ENOMEM);
        }

        rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
                                      NULL, ctx);
        if (rc) {
                ptlrpc_request_free(req);
                GOTO(out_ref, rc);
        }

        /* fix the user desc */
        if (req->rq_pack_udesc) {
                /* we rely the fact that this request is in AUTH mode,
                 * and user_desc at offset 2. */
                pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud));
                LASSERT(pud);
                pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid;
                pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid;
                pud->pud_cap = 0;
                pud->pud_ngroups = 0;
        }

        req->rq_phase = RQ_PHASE_RPC;
        rc = ptl_send_rpc(req, 1);
        if (rc)
                CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx,
                      ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc);

out_ref:
        ptlrpc_req_finished(req);
out:
        RETURN(rc);
}