Example #1
0
int
insertNode(struct opr_rbtree *head, int value) {

    struct intnode *inode;
    struct opr_rbtree_node *parent, **childptr;

    inode = malloc(sizeof(struct intnode));
    inode->value = value;

    childptr = &head->root;
    parent = NULL;

    while (*childptr) {
	struct intnode *tnode;
	parent = *childptr;
	tnode = opr_containerof(parent, struct intnode, node);

	if (value < tnode->value)
	    childptr = &(*childptr)->left;
	else if (value > tnode->value)
	    childptr = &(*childptr)->right;
	else
	    return -1;
    }
    opr_rbtree_insert(head, parent, childptr, &inode->node);
    return 0;
}
Example #2
0
struct rpc_dplx_rec *
rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags)
{
    struct rbtree_x_part *t;
    struct rpc_dplx_rec rk, *rec = NULL;
    struct opr_rbtree_node *nv;

    cond_init_rpc_dplx();

    rk.fd_k = fd;
    t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd);

    rwlock_rdlock(&t->lock);
    nv = opr_rbtree_lookup(&t->t, &rk.node_k);

    /* XXX rework lock+insert case, so that new entries are inserted
     * locked, and t->lock critical section is reduced */

    if (! nv) {
        rwlock_unlock(&t->lock);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rk.node_k);
        if (! nv) {
            rec = alloc_dplx_rec();
            if (! rec) {
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: failed allocating rpc_dplx_rec", __func__);
                goto unlock;
            }

            /* tell the caller */
            *oflags = RPC_DPLX_LKP_OFLAG_ALLOC;

            rec->fd_k = fd;

            if (opr_rbtree_insert(&t->t, &rec->node_k)) {
                /* cant happen */
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: collision inserting in locked rbtree partition",
                        __func__);
                free_dplx_rec(rec);
            }
        }
    }
    else {
        rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
        *oflags = RPC_DPLX_LKP_FLAG_NONE;
    }

    rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ?
                 RPC_DPLX_FLAG_LOCK :
                 RPC_DPLX_FLAG_NONE);

unlock:
    rwlock_unlock(&t->lock);

    return (rec);
}
Example #3
0
rpc_ctx_t *
alloc_rpc_call_ctx(CLIENT *clnt, rpcproc_t proc, xdrproc_t xdr_args,
                   void *args_ptr, xdrproc_t xdr_results, void *results_ptr,
                   struct timeval timeout)
{
    struct x_vc_data *xd = (struct x_vc_data *) clnt->cl_p1;
    struct rpc_dplx_rec *rec = xd->rec;
    rpc_ctx_t *ctx;

    ctx = mem_alloc(sizeof(rpc_ctx_t));
    if (! ctx)
        goto out;

    /* potects this */
    mutex_init(&ctx->we.mtx, NULL);
    cond_init(&ctx->we.cv, 0, NULL);

    /* rec->calls and rbtree protected by (adaptive) mtx */
    mutex_lock(&rec->mtx);

    /* XXX we hold the client-fd lock */
    ctx->xid = ++(xd->cx.calls.xid);

    /* some of this looks like overkill;  it's here to support future,
     * fully async calls */
    ctx->ctx_u.clnt.clnt = clnt;
    ctx->ctx_u.clnt.timeout.tv_sec = 0;
    ctx->ctx_u.clnt.timeout.tv_nsec = 0;
    timespec_addms(&ctx->ctx_u.clnt.timeout, tv_to_ms(&timeout));
    ctx->msg = alloc_rpc_msg();
    ctx->flags = 0;

    /* stash it */
    if (opr_rbtree_insert(&xd->cx.calls.t, &ctx->node_k)) {
        __warnx(TIRPC_DEBUG_FLAG_RPC_CTX,
                "%s: call ctx insert failed (xid %d client %p)",
                __func__,
                ctx->xid, clnt);
        mutex_unlock(&rec->mtx);
        mem_free(ctx, sizeof(rpc_ctx_t));
        ctx = NULL;
        goto out;
    }

    mutex_unlock(&rec->mtx);

out:
    return (ctx);
}
Example #4
0
void rpc_ctx_next_xid(rpc_ctx_t *ctx, uint32_t flags)
{
    struct x_vc_data *xd = (struct x_vc_data *) ctx->ctx_u.clnt.clnt->cl_p1;
    struct rpc_dplx_rec *rec = xd->rec;

    assert (flags & RPC_CTX_FLAG_LOCKED);

    mutex_lock(&rec->mtx);
    opr_rbtree_remove(&xd->cx.calls.t, &ctx->node_k);
    ctx->xid = ++(xd->cx.calls.xid);
    if (opr_rbtree_insert(&xd->cx.calls.t, &ctx->node_k)) {
        mutex_unlock(&rec->mtx);
        __warnx(TIRPC_DEBUG_FLAG_RPC_CTX,
                "%s: call ctx insert failed (xid %d client %p)",
                __func__,
                ctx->xid,
                ctx->ctx_u.clnt.clnt);
        goto out;
    }
    mutex_unlock(&rec->mtx);
out:
    return;
}
Example #5
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		/* Idempotent address, no need for lock;
		 * xprt will be valid as long as svc_req.
		 */
		drc = (drc_t *)req->rq_xprt->xp_u2;
		if (drc) {
			/* found, no danger of removal */
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));
			drc_k.type = dtype;

			/* Since the drc can last longer than the xprt,
			 * copy the address. Read operation of constant data,
			 * no xprt lock required.
			 */
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[SOCK_NAME_MAX];

				sprint_sockaddr(&drc_k.d_u.tcp.addr,
						str, sizeof(str));
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				PTHREAD_MUTEX_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;

			(void)nfs_dupreq_ref_drc(drc);	/* xprt ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			/* Idempotent address, no need for lock;
			 * set once here, never changes.
			 * No other fields are modified.
			 * Assumes address stores are atomic.
			 */
			req->rq_xprt->xp_u2 = (void *)drc;
		}
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	PTHREAD_MUTEX_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}
Example #6
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1;
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		pthread_mutex_lock(&req->rq_xprt->xp_lock);
		if (xu->drc) {
			drc = xu->drc;
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			pthread_mutex_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));

			drc_k.type = dtype;
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[512];
				sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512);
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				pthread_mutex_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				pthread_mutex_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;
			/* xprt drc */
			(void)nfs_dupreq_ref_drc(drc);	/* xu ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			xu->drc = drc;
		}
		pthread_mutex_unlock(&req->rq_xprt->xp_lock);
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	pthread_mutex_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}