示例#1
0
struct svc_rpc_gss_data *
authgss_ctx_hash_get(struct rpc_gss_cred *gc)
{
	struct svc_rpc_gss_data gk, *gd = NULL;
	gss_union_ctx_id_desc *gss_ctx;
	struct opr_rbtree_node *ngd;
	struct authgss_x_part *axp;
	struct rbtree_x_part *t;

	cond_init_authgss_hash();

	gss_ctx = (gss_union_ctx_id_desc *) (gc->gc_ctx.value);
	gk.hk.k = gss_ctx_hash(gss_ctx);

	t = rbtx_partition_of_scalar(&authgss_hash_st.xt, gk.hk.k);
	mutex_lock(&t->mtx);
	ngd =
	    rbtree_x_cached_lookup(&authgss_hash_st.xt, t, &gk.node_k, gk.hk.k);
	if (ngd) {
		gd = opr_containerof(ngd, struct svc_rpc_gss_data, node_k);
		/* lru adjust */
		axp = (struct authgss_x_part *)t->u1;
		TAILQ_REMOVE(&axp->lru_q, gd, lru_q);
		TAILQ_INSERT_TAIL(&axp->lru_q, gd, lru_q);
		++(axp->gen);
		(void)atomic_inc_uint32_t(&gd->refcnt);
		(void)atomic_inc_uint32_t(&gd->gen);
	}
	mutex_unlock(&t->mtx);

	return (gd);
}
示例#2
0
/**
 * @brief Start a duplicate request transaction
 *
 * Finds any matching request entry in the cache, if one exists, else
 * creates one in the START state.  On any non-error return, the refcnt
 * of the corresponding entry is incremented.
 *
 * @param[in] reqnfs  The NFS request data
 * @param[in] req     The request to be cached
 *
 * @retval DUPREQ_SUCCESS if successful.
 * @retval DUPREQ_INSERT_MALLOC_ERROR if an error occured during insertion.
 */
dupreq_status_t nfs_dupreq_start(nfs_request_t *reqnfs,
				 struct svc_req *req)
{
	dupreq_status_t status = DUPREQ_SUCCESS;
	dupreq_entry_t *dv, *dk = NULL;
	bool release_dk = true;
	nfs_res_t *res = NULL;
	drc_t *drc;

	/* Disabled? */
	if (nfs_param.core_param.drc.disabled) {
		req->rq_u1 = (void *)DUPREQ_NOCACHE;
		res = alloc_nfs_res();
		goto out;
	}

	req->rq_u1 = (void *)DUPREQ_BAD_ADDR1;
	req->rq_u2 = (void *)DUPREQ_BAD_ADDR1;

	drc = nfs_dupreq_get_drc(req);
	if (!drc) {
		status = DUPREQ_INSERT_MALLOC_ERROR;
		goto out;
	}

	switch (drc->type) {
	case DRC_TCP_V4:
		if (reqnfs->funcdesc->service_function == nfs4_Compound) {
			if (!nfs_dupreq_v4_cacheable(reqnfs)) {
				/* for such requests, we merely thread
				 * the request through for later
				 * cleanup--all v41 caching is handled
				 * by the v41 slot reply cache */
				req->rq_u1 = (void *)DUPREQ_NOCACHE;
				res = alloc_nfs_res();
				goto out;
			}
		}
		break;
	default:
		/* likewise for other protocol requests we may not or choose not
		 * to cache */
		if (!(reqnfs->funcdesc->dispatch_behaviour & CAN_BE_DUP)) {
			req->rq_u1 = (void *)DUPREQ_NOCACHE;
			res = alloc_nfs_res();
			goto out;
		}
		break;
	}

	dk = alloc_dupreq();
	if (dk == NULL) {
		release_dk = false;
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	dk->hin.drc = drc;	/* trans. call path ref to dv */

	switch (drc->type) {
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		dk->hin.tcp.rq_xid = req->rq_xid;
		/* XXX needed? */
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	case DRC_UDP_V234:
		dk->hin.tcp.rq_xid = req->rq_xid;
		if (unlikely(!copy_xprt_addr(&dk->hin.addr, req->rq_xprt))) {
			status = DUPREQ_INSERT_MALLOC_ERROR;
			goto release_dk;
		}
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	default:
		/* error */
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	/* TI-RPC computed checksum */
	dk->hk = req->rq_cksum;

	dk->state = DUPREQ_START;
	dk->timestamp = time(NULL);

	{
		struct opr_rbtree_node *nv;
		struct rbtree_x_part *t =
		    rbtx_partition_of_scalar(&drc->xt, dk->hk);
		PTHREAD_MUTEX_lock(&t->mtx);	/* partition lock */
		nv = rbtree_x_cached_lookup(&drc->xt, t, &dk->rbt_k, dk->hk);
		if (nv) {
			/* cached request */
			dv = opr_containerof(nv, dupreq_entry_t, rbt_k);
			PTHREAD_MUTEX_lock(&dv->mtx);
			if (unlikely(dv->state == DUPREQ_START)) {
				status = DUPREQ_BEING_PROCESSED;
			} else {
				/* satisfy req from the DRC, incref,
				   extend window */
				res = dv->res;
				PTHREAD_MUTEX_lock(&drc->mtx);
				drc_inc_retwnd(drc);
				PTHREAD_MUTEX_unlock(&drc->mtx);
				status = DUPREQ_EXISTS;
				(dv->refcnt)++;
			}
			LogDebug(COMPONENT_DUPREQ,
				 "dupreq hit dk=%p, dk xid=%u cksum %" PRIu64
				 " state=%s", dk, dk->hin.tcp.rq_xid, dk->hk,
				 dupreq_state_table[dk->state]);
			req->rq_u1 = dv;
			PTHREAD_MUTEX_unlock(&dv->mtx);
		} else {
			/* new request */
			res = req->rq_u2 = dk->res = alloc_nfs_res();
			(void)rbtree_x_cached_insert(&drc->xt, t, &dk->rbt_k,
						     dk->hk);
			(dk->refcnt)++;
			/* add to q tail */
			PTHREAD_MUTEX_lock(&drc->mtx);
			TAILQ_INSERT_TAIL(&drc->dupreq_q, dk, fifo_q);
			++(drc->size);
			PTHREAD_MUTEX_unlock(&drc->mtx);
			req->rq_u1 = dk;
			release_dk = false;
			dv = dk;
		}
		PTHREAD_MUTEX_unlock(&t->mtx);
	}

	LogFullDebug(COMPONENT_DUPREQ,
		     "starting dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dk->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

 release_dk:
	if (release_dk)
		nfs_dupreq_free_dupreq(dk);

	nfs_dupreq_put_drc(req->rq_xprt, drc, DRC_FLAG_NONE);	/* dk ref */

 out:
	if (res)
		reqnfs->res_nfs = req->rq_u2 = res;

	return status;
}