/**
 * @brief Check for expired TCP DRCs.
 */
static inline void drc_free_expired(void)
{
	drc_t *drc;
	time_t now = time(NULL);
	struct rbtree_x_part *t;
	struct opr_rbtree_node *odrc = NULL;

	DRC_ST_LOCK();

	if ((drc_st->tcp_drc_recycle_qlen < 1) ||
	    (now - drc_st->last_expire_check) < 600) /* 10m */
		goto unlock;

	do {
		drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q);
		if (drc && (drc->d_u.tcp.recycle_time > 0)
		    && ((now - drc->d_u.tcp.recycle_time) >
			drc_st->expire_delta) && (drc->refcnt == 0)) {
			LogFullDebug(COMPONENT_DUPREQ,
				     "remove expired drc %p from recycle queue",
				     drc);
			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc->d_u.tcp.hk);

			odrc =
			    opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k);
			if (!odrc) {
				LogCrit(COMPONENT_DUPREQ,
					"BUG: asked to dequeue DRC not on queue");
			} else {
				(void)opr_rbtree_remove(&t->t,
							&drc->d_u.tcp.
							recycle_k);
			}
			TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc,
				     d_u.tcp.recycle_q);
			--(drc_st->tcp_drc_recycle_qlen);
			/* expect DRC to be reachable from some xprt(s) */
			PTHREAD_MUTEX_lock(&drc->mtx);
			drc->flags &= ~DRC_FLAG_RECYCLE;
			/* but if not, dispose it */
			if (drc->refcnt == 0) {
				PTHREAD_MUTEX_unlock(&drc->mtx);
				free_tcp_drc(drc);
				continue;
			}
			PTHREAD_MUTEX_unlock(&drc->mtx);
		} else {
			LogFullDebug(COMPONENT_DUPREQ,
				     "unexpired drc %p in recycle queue expire check (nothing happens)",
				     drc);
			drc_st->last_expire_check = now;
			break;
		}

	} while (1);

 unlock:
	DRC_ST_UNLOCK();
}
Beispiel #2
0
/**
 * @brief Release previously-ref'd DRC.
 *
 * Release previously-ref'd DRC.  If its refcnt drops to 0, the DRC
 * is queued for later recycling.
 *
 * @param[in] xprt  The SVCXPRT associated with DRC, if applicable
 * @param[in] drc   The DRC
 * @param[in] flags Control flags
 */
void nfs_dupreq_put_drc(SVCXPRT *xprt, drc_t *drc, uint32_t flags)
{
	if (!(flags & DRC_FLAG_LOCKED))
		pthread_mutex_lock(&drc->mtx);
	/* drc LOCKED */

	if (drc->refcnt == 0) {
		LogCrit(COMPONENT_DUPREQ,
			"drc %p refcnt will underrun " "refcnt=%u", drc,
			drc->refcnt);
	}

	nfs_dupreq_unref_drc(drc);

	LogFullDebug(COMPONENT_DUPREQ, "drc %p refcnt==%u", drc, drc->refcnt);

	switch (drc->type) {
	case DRC_UDP_V234:
		/* do nothing */
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		if (drc->refcnt == 0) {
			if (!(drc->flags & DRC_FLAG_RECYCLE)) {
				/* note t's lock order wrt drc->mtx is
				 * the opposite of drc->xt[*].lock */
				drc->d_u.tcp.recycle_time = time(NULL);
				drc->flags |= DRC_FLAG_RECYCLE;
				pthread_mutex_unlock(&drc->mtx); /* !LOCKED */
				DRC_ST_LOCK();
				TAILQ_INSERT_TAIL(&drc_st->tcp_drc_recycle_q,
						  drc, d_u.tcp.recycle_q);
				++(drc_st->tcp_drc_recycle_qlen);
				LogFullDebug(COMPONENT_DUPREQ,
					     "enqueue drc %p for recycle", drc);
				DRC_ST_UNLOCK();
				goto out;
			}
		}
	default:
		break;
	};

	pthread_mutex_unlock(&drc->mtx); /* !LOCKED */
out:
	return;
}
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		/* Idempotent address, no need for lock;
		 * xprt will be valid as long as svc_req.
		 */
		drc = (drc_t *)req->rq_xprt->xp_u2;
		if (drc) {
			/* found, no danger of removal */
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));
			drc_k.type = dtype;

			/* Since the drc can last longer than the xprt,
			 * copy the address. Read operation of constant data,
			 * no xprt lock required.
			 */
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[SOCK_NAME_MAX];

				sprint_sockaddr(&drc_k.d_u.tcp.addr,
						str, sizeof(str));
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				PTHREAD_MUTEX_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;

			(void)nfs_dupreq_ref_drc(drc);	/* xprt ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			/* Idempotent address, no need for lock;
			 * set once here, never changes.
			 * No other fields are modified.
			 * Assumes address stores are atomic.
			 */
			req->rq_xprt->xp_u2 = (void *)drc;
		}
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	PTHREAD_MUTEX_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}
Beispiel #4
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1;
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		pthread_mutex_lock(&req->rq_xprt->xp_lock);
		if (xu->drc) {
			drc = xu->drc;
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			pthread_mutex_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));

			drc_k.type = dtype;
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[512];
				sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512);
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				pthread_mutex_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				pthread_mutex_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;
			/* xprt drc */
			(void)nfs_dupreq_ref_drc(drc);	/* xu ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			xu->drc = drc;
		}
		pthread_mutex_unlock(&req->rq_xprt->xp_lock);
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	pthread_mutex_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}