コード例 #1
0
ファイル: authgss_hash.c プロジェクト: hkoehler/ntirpc
bool
authgss_ctx_hash_set(struct svc_rpc_gss_data *gd)
{
	struct rbtree_x_part *t;
	struct authgss_x_part *axp;
	gss_union_ctx_id_desc *gss_ctx;
	bool rslt;

	cond_init_authgss_hash();

	gss_ctx = (gss_union_ctx_id_desc *) (gd->ctx);
	gd->hk.k = gss_ctx_hash(gss_ctx);

	++(gd->refcnt);		/* locked */
	t = rbtx_partition_of_scalar(&authgss_hash_st.xt, gd->hk.k);
	mutex_lock(&t->mtx);
	rslt =
	    rbtree_x_cached_insert(&authgss_hash_st.xt, t, &gd->node_k,
				   gd->hk.k);
	/* lru */
	axp = (struct authgss_x_part *)t->u1;
	TAILQ_INSERT_TAIL(&axp->lru_q, gd, lru_q);
	mutex_unlock(&t->mtx);

	/* global size */
	(void)atomic_inc_uint32_t(&authgss_hash_st.size);

	return (rslt);
}
コード例 #2
0
/**
 * @brief Check for expired TCP DRCs.
 */
static inline void drc_free_expired(void)
{
	drc_t *drc;
	time_t now = time(NULL);
	struct rbtree_x_part *t;
	struct opr_rbtree_node *odrc = NULL;

	DRC_ST_LOCK();

	if ((drc_st->tcp_drc_recycle_qlen < 1) ||
	    (now - drc_st->last_expire_check) < 600) /* 10m */
		goto unlock;

	do {
		drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q);
		if (drc && (drc->d_u.tcp.recycle_time > 0)
		    && ((now - drc->d_u.tcp.recycle_time) >
			drc_st->expire_delta) && (drc->refcnt == 0)) {
			LogFullDebug(COMPONENT_DUPREQ,
				     "remove expired drc %p from recycle queue",
				     drc);
			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc->d_u.tcp.hk);

			odrc =
			    opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k);
			if (!odrc) {
				LogCrit(COMPONENT_DUPREQ,
					"BUG: asked to dequeue DRC not on queue");
			} else {
				(void)opr_rbtree_remove(&t->t,
							&drc->d_u.tcp.
							recycle_k);
			}
			TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc,
				     d_u.tcp.recycle_q);
			--(drc_st->tcp_drc_recycle_qlen);
			/* expect DRC to be reachable from some xprt(s) */
			PTHREAD_MUTEX_lock(&drc->mtx);
			drc->flags &= ~DRC_FLAG_RECYCLE;
			/* but if not, dispose it */
			if (drc->refcnt == 0) {
				PTHREAD_MUTEX_unlock(&drc->mtx);
				free_tcp_drc(drc);
				continue;
			}
			PTHREAD_MUTEX_unlock(&drc->mtx);
		} else {
			LogFullDebug(COMPONENT_DUPREQ,
				     "unexpired drc %p in recycle queue expire check (nothing happens)",
				     drc);
			drc_st->last_expire_check = now;
			break;
		}

	} while (1);

 unlock:
	DRC_ST_UNLOCK();
}
コード例 #3
0
ファイル: authgss_hash.c プロジェクト: hkoehler/ntirpc
struct svc_rpc_gss_data *
authgss_ctx_hash_get(struct rpc_gss_cred *gc)
{
	struct svc_rpc_gss_data gk, *gd = NULL;
	gss_union_ctx_id_desc *gss_ctx;
	struct opr_rbtree_node *ngd;
	struct authgss_x_part *axp;
	struct rbtree_x_part *t;

	cond_init_authgss_hash();

	gss_ctx = (gss_union_ctx_id_desc *) (gc->gc_ctx.value);
	gk.hk.k = gss_ctx_hash(gss_ctx);

	t = rbtx_partition_of_scalar(&authgss_hash_st.xt, gk.hk.k);
	mutex_lock(&t->mtx);
	ngd =
	    rbtree_x_cached_lookup(&authgss_hash_st.xt, t, &gk.node_k, gk.hk.k);
	if (ngd) {
		gd = opr_containerof(ngd, struct svc_rpc_gss_data, node_k);
		/* lru adjust */
		axp = (struct authgss_x_part *)t->u1;
		TAILQ_REMOVE(&axp->lru_q, gd, lru_q);
		TAILQ_INSERT_TAIL(&axp->lru_q, gd, lru_q);
		++(axp->gen);
		(void)atomic_inc_uint32_t(&gd->refcnt);
		(void)atomic_inc_uint32_t(&gd->gen);
	}
	mutex_unlock(&t->mtx);

	return (gd);
}
コード例 #4
0
ファイル: rpc_dplx.c プロジェクト: vganapathi/ntirpc
struct rpc_dplx_rec *
rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags)
{
    struct rbtree_x_part *t;
    struct rpc_dplx_rec rk, *rec = NULL;
    struct opr_rbtree_node *nv;

    cond_init_rpc_dplx();

    rk.fd_k = fd;
    t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd);

    rwlock_rdlock(&t->lock);
    nv = opr_rbtree_lookup(&t->t, &rk.node_k);

    /* XXX rework lock+insert case, so that new entries are inserted
     * locked, and t->lock critical section is reduced */

    if (! nv) {
        rwlock_unlock(&t->lock);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rk.node_k);
        if (! nv) {
            rec = alloc_dplx_rec();
            if (! rec) {
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: failed allocating rpc_dplx_rec", __func__);
                goto unlock;
            }

            /* tell the caller */
            *oflags = RPC_DPLX_LKP_OFLAG_ALLOC;

            rec->fd_k = fd;

            if (opr_rbtree_insert(&t->t, &rec->node_k)) {
                /* cant happen */
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: collision inserting in locked rbtree partition",
                        __func__);
                free_dplx_rec(rec);
            }
        }
    }
    else {
        rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
        *oflags = RPC_DPLX_LKP_FLAG_NONE;
    }

    rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ?
                 RPC_DPLX_FLAG_LOCK :
                 RPC_DPLX_FLAG_NONE);

unlock:
    rwlock_unlock(&t->lock);

    return (rec);
}
コード例 #5
0
/**
 *
 * @brief Remove an entry (request) from a duplicate request cache.
 *
 * The expected pattern is that nfs_rpc_execute shall delete requests only
 * in error conditions.  The refcnt of the corresponding duplicate request
 * entry is unchanged (ie., the caller must still call nfs_dupreq_rele).
 *
 * We assert req->rq_u1 now points to the corresonding duplicate request
 * cache entry.
 *
 * @param[in] req The svc_req structure.
 *
 * @return DUPREQ_SUCCESS if successful.
 *
 */
dupreq_status_t nfs_dupreq_delete(struct svc_req *req)
{
	dupreq_entry_t *dv = (dupreq_entry_t *)req->rq_u1;
	dupreq_status_t status = DUPREQ_SUCCESS;
	struct rbtree_x_part *t;
	drc_t *drc;

	/* do nothing if req is marked no-cache */
	if (dv == (void *)DUPREQ_NOCACHE)
		goto out;

	/* do nothing if nfs_dupreq_start failed completely */
	if (dv == (void *)DUPREQ_BAD_ADDR1)
		goto out;

	PTHREAD_MUTEX_lock(&dv->mtx);
	drc = dv->hin.drc;
	dv->state = DUPREQ_DELETED;
	PTHREAD_MUTEX_unlock(&dv->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "deleting dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

	/* XXX dv holds a ref on drc */
	t = rbtx_partition_of_scalar(&drc->xt, dv->hk);

	PTHREAD_MUTEX_lock(&t->mtx);
	rbtree_x_cached_remove(&drc->xt, t, &dv->rbt_k, dv->hk);

	PTHREAD_MUTEX_unlock(&t->mtx);
	PTHREAD_MUTEX_lock(&drc->mtx);

	if (TAILQ_IS_ENQUEUED(dv, fifo_q))
		TAILQ_REMOVE(&drc->dupreq_q, dv, fifo_q);
	--(drc->size);

	/* release dv's ref and unlock */
	nfs_dupreq_put_drc(req->rq_xprt, drc, DRC_FLAG_LOCKED);
	/* !LOCKED */

 out:
	return status;
}
コード例 #6
0
ファイル: rpc_dplx.c プロジェクト: vganapathi/ntirpc
int32_t
rpc_dplx_unref(struct rpc_dplx_rec *rec, u_int flags)
{
    struct rbtree_x_part *t;
    struct opr_rbtree_node *nv;
    int32_t refcnt;

    if (! (flags & RPC_DPLX_FLAG_LOCKED))
        REC_LOCK(rec);

    refcnt = --(rec->refcnt);

    __warnx(TIRPC_DEBUG_FLAG_REFCNT,
            "%s: postunref %p rec->refcnt %u",
            __func__, rec, refcnt);

    if (rec->refcnt == 0) {
        t = rbtx_partition_of_scalar(&rpc_dplx_rec_set.xt, rec->fd_k);
        REC_UNLOCK(rec);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rec->node_k);
        rec = NULL;
        if (nv) {
            rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
            REC_LOCK(rec);
            if (rec->refcnt == 0) {
                (void) opr_rbtree_remove(&t->t, &rec->node_k);
                REC_UNLOCK(rec);
                __warnx(TIRPC_DEBUG_FLAG_REFCNT,
                        "%s: free rec %p rec->refcnt %u",
                        __func__, rec, refcnt);

                free_dplx_rec(rec);
                rec = NULL;
            } else {
                refcnt = rec->refcnt;
            }
        }
        rwlock_unlock(&t->lock);
    }
コード例 #7
0
ファイル: authgss_hash.c プロジェクト: hkoehler/ntirpc
bool
authgss_ctx_hash_del(struct svc_rpc_gss_data *gd)
{
	struct rbtree_x_part *t;
	struct authgss_x_part *axp;

	cond_init_authgss_hash();

	t = rbtx_partition_of_scalar(&authgss_hash_st.xt, gd->hk.k);
	mutex_lock(&t->mtx);
	rbtree_x_cached_remove(&authgss_hash_st.xt, t, &gd->node_k, gd->hk.k);
	axp = (struct authgss_x_part *)t->u1;
	TAILQ_REMOVE(&axp->lru_q, gd, lru_q);
	mutex_unlock(&t->mtx);

	/* global size */
	(void)atomic_dec_uint32_t(&authgss_hash_st.size);

	/* release gd */
	unref_svc_rpc_gss_data(gd, SVC_RPC_GSS_FLAG_NONE);

	return (true);
}
コード例 #8
0
/**
 * @brief Start a duplicate request transaction
 *
 * Finds any matching request entry in the cache, if one exists, else
 * creates one in the START state.  On any non-error return, the refcnt
 * of the corresponding entry is incremented.
 *
 * @param[in] reqnfs  The NFS request data
 * @param[in] req     The request to be cached
 *
 * @retval DUPREQ_SUCCESS if successful.
 * @retval DUPREQ_INSERT_MALLOC_ERROR if an error occured during insertion.
 */
dupreq_status_t nfs_dupreq_start(nfs_request_t *reqnfs,
				 struct svc_req *req)
{
	dupreq_status_t status = DUPREQ_SUCCESS;
	dupreq_entry_t *dv, *dk = NULL;
	bool release_dk = true;
	nfs_res_t *res = NULL;
	drc_t *drc;

	/* Disabled? */
	if (nfs_param.core_param.drc.disabled) {
		req->rq_u1 = (void *)DUPREQ_NOCACHE;
		res = alloc_nfs_res();
		goto out;
	}

	req->rq_u1 = (void *)DUPREQ_BAD_ADDR1;
	req->rq_u2 = (void *)DUPREQ_BAD_ADDR1;

	drc = nfs_dupreq_get_drc(req);
	if (!drc) {
		status = DUPREQ_INSERT_MALLOC_ERROR;
		goto out;
	}

	switch (drc->type) {
	case DRC_TCP_V4:
		if (reqnfs->funcdesc->service_function == nfs4_Compound) {
			if (!nfs_dupreq_v4_cacheable(reqnfs)) {
				/* for such requests, we merely thread
				 * the request through for later
				 * cleanup--all v41 caching is handled
				 * by the v41 slot reply cache */
				req->rq_u1 = (void *)DUPREQ_NOCACHE;
				res = alloc_nfs_res();
				goto out;
			}
		}
		break;
	default:
		/* likewise for other protocol requests we may not or choose not
		 * to cache */
		if (!(reqnfs->funcdesc->dispatch_behaviour & CAN_BE_DUP)) {
			req->rq_u1 = (void *)DUPREQ_NOCACHE;
			res = alloc_nfs_res();
			goto out;
		}
		break;
	}

	dk = alloc_dupreq();
	if (dk == NULL) {
		release_dk = false;
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	dk->hin.drc = drc;	/* trans. call path ref to dv */

	switch (drc->type) {
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		dk->hin.tcp.rq_xid = req->rq_xid;
		/* XXX needed? */
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	case DRC_UDP_V234:
		dk->hin.tcp.rq_xid = req->rq_xid;
		if (unlikely(!copy_xprt_addr(&dk->hin.addr, req->rq_xprt))) {
			status = DUPREQ_INSERT_MALLOC_ERROR;
			goto release_dk;
		}
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	default:
		/* error */
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	/* TI-RPC computed checksum */
	dk->hk = req->rq_cksum;

	dk->state = DUPREQ_START;
	dk->timestamp = time(NULL);

	{
		struct opr_rbtree_node *nv;
		struct rbtree_x_part *t =
		    rbtx_partition_of_scalar(&drc->xt, dk->hk);
		PTHREAD_MUTEX_lock(&t->mtx);	/* partition lock */
		nv = rbtree_x_cached_lookup(&drc->xt, t, &dk->rbt_k, dk->hk);
		if (nv) {
			/* cached request */
			dv = opr_containerof(nv, dupreq_entry_t, rbt_k);
			PTHREAD_MUTEX_lock(&dv->mtx);
			if (unlikely(dv->state == DUPREQ_START)) {
				status = DUPREQ_BEING_PROCESSED;
			} else {
				/* satisfy req from the DRC, incref,
				   extend window */
				res = dv->res;
				PTHREAD_MUTEX_lock(&drc->mtx);
				drc_inc_retwnd(drc);
				PTHREAD_MUTEX_unlock(&drc->mtx);
				status = DUPREQ_EXISTS;
				(dv->refcnt)++;
			}
			LogDebug(COMPONENT_DUPREQ,
				 "dupreq hit dk=%p, dk xid=%u cksum %" PRIu64
				 " state=%s", dk, dk->hin.tcp.rq_xid, dk->hk,
				 dupreq_state_table[dk->state]);
			req->rq_u1 = dv;
			PTHREAD_MUTEX_unlock(&dv->mtx);
		} else {
			/* new request */
			res = req->rq_u2 = dk->res = alloc_nfs_res();
			(void)rbtree_x_cached_insert(&drc->xt, t, &dk->rbt_k,
						     dk->hk);
			(dk->refcnt)++;
			/* add to q tail */
			PTHREAD_MUTEX_lock(&drc->mtx);
			TAILQ_INSERT_TAIL(&drc->dupreq_q, dk, fifo_q);
			++(drc->size);
			PTHREAD_MUTEX_unlock(&drc->mtx);
			req->rq_u1 = dk;
			release_dk = false;
			dv = dk;
		}
		PTHREAD_MUTEX_unlock(&t->mtx);
	}

	LogFullDebug(COMPONENT_DUPREQ,
		     "starting dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dk->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

 release_dk:
	if (release_dk)
		nfs_dupreq_free_dupreq(dk);

	nfs_dupreq_put_drc(req->rq_xprt, drc, DRC_FLAG_NONE);	/* dk ref */

 out:
	if (res)
		reqnfs->res_nfs = req->rq_u2 = res;

	return status;
}
コード例 #9
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		/* Idempotent address, no need for lock;
		 * xprt will be valid as long as svc_req.
		 */
		drc = (drc_t *)req->rq_xprt->xp_u2;
		if (drc) {
			/* found, no danger of removal */
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));
			drc_k.type = dtype;

			/* Since the drc can last longer than the xprt,
			 * copy the address. Read operation of constant data,
			 * no xprt lock required.
			 */
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[SOCK_NAME_MAX];

				sprint_sockaddr(&drc_k.d_u.tcp.addr,
						str, sizeof(str));
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				PTHREAD_MUTEX_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;

			(void)nfs_dupreq_ref_drc(drc);	/* xprt ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			/* Idempotent address, no need for lock;
			 * set once here, never changes.
			 * No other fields are modified.
			 * Assumes address stores are atomic.
			 */
			req->rq_xprt->xp_u2 = (void *)drc;
		}
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	PTHREAD_MUTEX_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}
コード例 #10
0
/**
 * @brief Completes a request in the cache
 *
 * Completes a cache insertion operation begun in nfs_dupreq_start.
 * The refcnt of the corresponding duplicate request entry is unchanged
 * (ie, the caller must still call nfs_dupreq_rele).
 *
 * In contrast with the prior DRC implementation, completing a request
 * in the current implementation may under normal conditions cause one
 * or more cached requests to be retired.  Requests are retired in the
 * order they were inserted.  The primary retire algorithm is a high
 * water mark, and a windowing heuristic.  One or more requests will be
 * retired if the water mark/timeout is exceeded, and if a no duplicate
 * requests have been found in the cache in a configurable window of
 * immediately preceding requests.  A timeout may supplement the water mark,
 * in future.
 *
 * req->rq_u1 has either a magic value, or points to a duplicate request
 * cache entry allocated in nfs_dupreq_start.
 *
 * @param[in] req     The request
 * @param[in] res_nfs The response
 *
 * @return DUPREQ_SUCCESS if successful.
 * @return DUPREQ_INSERT_MALLOC_ERROR if an error occured.
 */
dupreq_status_t nfs_dupreq_finish(struct svc_req *req, nfs_res_t *res_nfs)
{
	dupreq_entry_t *ov = NULL, *dv = (dupreq_entry_t *)req->rq_u1;
	dupreq_status_t status = DUPREQ_SUCCESS;
	struct rbtree_x_part *t;
	drc_t *drc = NULL;

	/* do nothing if req is marked no-cache */
	if (dv == (void *)DUPREQ_NOCACHE)
		goto out;

	/* do nothing if nfs_dupreq_start failed completely */
	if (dv == (void *)DUPREQ_BAD_ADDR1)
		goto out;

	PTHREAD_MUTEX_lock(&dv->mtx);
	dv->res = res_nfs;
	dv->timestamp = time(NULL);
	dv->state = DUPREQ_COMPLETE;
	drc = dv->hin.drc;
	PTHREAD_MUTEX_unlock(&dv->mtx);

	/* cond. remove from q head */
	PTHREAD_MUTEX_lock(&drc->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "completing dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

	/* ok, do the new retwnd calculation here.  then, put drc only if
	 * we retire an entry */
	if (drc_should_retire(drc)) {
		/* again: */
		ov = TAILQ_FIRST(&drc->dupreq_q);
		if (likely(ov)) {
			/* finished request count against retwnd */
			drc_dec_retwnd(drc);
			/* check refcnt */
			if (ov->refcnt > 0) {
				/* ov still in use, apparently */
				goto unlock;
			}
			/* remove q entry */
			TAILQ_REMOVE(&drc->dupreq_q, ov, fifo_q);
			--(drc->size);

			/* remove dict entry */
			t = rbtx_partition_of_scalar(&drc->xt, ov->hk);
			/* interlock */
			PTHREAD_MUTEX_unlock(&drc->mtx);
			PTHREAD_MUTEX_lock(&t->mtx);	/* partition lock */
			rbtree_x_cached_remove(&drc->xt, t, &ov->rbt_k, ov->hk);
			PTHREAD_MUTEX_unlock(&t->mtx);

			LogDebug(COMPONENT_DUPREQ,
				 "retiring ov=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
				 ov, ov->hin.tcp.rq_xid,
				 ov->hin.drc, dupreq_state_table[dv->state],
				 dupreq_status_table[status], ov->refcnt);

			/* deep free ov */
			nfs_dupreq_free_dupreq(ov);
			goto out;
		}
	}

 unlock:
	PTHREAD_MUTEX_unlock(&drc->mtx);

 out:
	return status;
}
コード例 #11
0
ファイル: nfs_dupreq.c プロジェクト: JasonZen/nfs-ganesha
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1;
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		pthread_mutex_lock(&req->rq_xprt->xp_lock);
		if (xu->drc) {
			drc = xu->drc;
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			pthread_mutex_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));

			drc_k.type = dtype;
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[512];
				sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512);
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				pthread_mutex_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				pthread_mutex_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;
			/* xprt drc */
			(void)nfs_dupreq_ref_drc(drc);	/* xu ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			xu->drc = drc;
		}
		pthread_mutex_unlock(&req->rq_xprt->xp_lock);
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	pthread_mutex_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}